This file serves to be a supplementary document that describes all the statistics results performed for this project. It may help to test some new questions that are not included in the corresponding slides.
This file displays the results of the FaceWord project (data collected at NYU). There are two experiments in this project. In Experiment 1, Chinese participants viewed faces and Chinese characters in four conditions (Layout: intact, exchange [top and bottom parts were switched], top and bottom) and completed an additional localizer (Chinese faces, Chinese characters, objects, scrambled objects). In Experiment 2, English speakers viewed Chinese characters and English words in four conditions (Layout: intact, exchange, top [top parts of Chinese characters; left two letters for English words] and bottom [bottom parts of Chinese characters; right four letters for English words]) and completed an additional localizer (Caucasian faces, English words, objects, scrambled objects).
For the main runs, analysis is conducted for each ROI separately (FFA1, FFA2, VWFA, LOC).
For each ROI, three analyses are performed:
libsvm is used to decode different condition pairs (see below) and one-tail one-sample t-tests is used to test if the pair of conditions can be decoded [whether the accuracy is significantly larger than the chancel level (0.5); one-tail one-sample t-tests]. Leave-one(-run)-out cross-validation is applied. No normalized or demean were used.
The probability was estimated for each particiapnt separately:
libsvm) is trained with the patterns of intact vs. exchange (10 runs).# set the order of levels in factors
loc_order <- c("face", "object", "word", "scrambled")
faceword_order <- c("faces", "words")
words_order <- c("English", "Chinese")
layout_order <- c("intact", "exchange", "top", "bottom")
roi_order <- c("FFA1", "FFA2", "VWFA", "LO")
label_FFA1 <- c("roi.lh.f-vs-o.ffa1.label", "roi.rh.f-vs-o.ffa1.label")
label_FFA2 <- c("roi.lh.f-vs-o.ffa2.label", "roi.rh.f-vs-o.ffa2.label")
label_VWFA <- "roi.lh.word-vs-face-object-scrambled.label"
label_LO <- c("roi.lh.o-vs-scr.label", "roi.rh.o-vs-scr.label")
# criterion of vertex number
nVtx_size_min <- 30 # mm^2
# set up the theme for plot and rainclound plot
# load all the R files in "Utilities/"
tmp <- sapply(list.files('Utilities', "*.R", full.names = TRUE, recursive = TRUE), source)
activationUL <- 2.75
onesample0 <- 0.3 # the staring point of y axis (for one-sample t-tests)
nDigitals <- 3 # number of digitials of p-values in plots
pair_order_E1 <- c("face_intact-word_intact",
"face_intact-face_exchange",
"face_top-face_bottom",
"word_intact-word_exchange",
"word_top-word_bottom")
df_label <- read_csv(file.path("data", "faceword_E1_Label_HJ.csv")) %>%
mutate(roi = str_remove(Label, "roi."),
roi = str_remove(roi, ".label")) %>%
mutate(Subject = str_replace(SubjCode, "\\_.*", ""))
# df_label %>% head()
df_label %>%
select(SubjCode, roi, Size) %>%
pivot_wider(names_from = roi, values_from = Size) %>%
arrange(SubjCode)
The above table displays the size (in mm2) of each label for each participant. (NA denotes that this label is not available for that particiapnt.)
df_label %>%
select(SubjCode, roi, NVtxs) %>%
pivot_wider(names_from = roi, values_from = NVtxs) %>%
arrange(SubjCode)
The above table displays the number of vertices for each label and each participant. (NA denotes that this label is not available for that particiapnt.)
df_label %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
SDSize = sd(Size),
meanNVtx = mean(NVtxs),
SDNVtx = sd(NVtxs))
df_nlabel <- df_label %>%
filter(Size > nVtx_size_min) %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
SDSize = sd(Size),
meanNVtx = mean(NVtxs),
SDNVtx = sd(NVtxs))
df_nlabel
The above table dispalys the number of participants included in the following analyses for each ROI. (VWFA is only found on the left hemisphere.)
# load data file from functional scans for univerate analysis
df_uni_E1 <- read_csv(file.path("data", "faceword_E1_Uni_HJ.csv"))
head(df_uni_E1)
df_clean_uni_E1 <- {
df_uni_E1 %>%
filter(Response != "NaN") %>%
separate(Condition, c("FaceWord", "Layout"), "_") %>% # separate the conditions into two IVs
mutate(FaceWord = gsub("face", "faces", FaceWord),
FaceWord = gsub("word", "words", FaceWord),
Layout = factor(Layout, levels = layout_order), # convert the two IVs to factors
Hemisphere = if_else(grepl("lh", Label), "left", if_else(grepl("rh", Label), "right", "NA"))) %>%
select(Hemisphere, Label, SessCode, FaceWord, Layout, Response) %>%
mutate(Subject = str_replace(SessCode, "\\_.*", "")) %>%
left_join(df_label, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
}
head(df_clean_uni_E1)
df_decode_E1 <- read_csv(file.path("data", "faceword_E1_Decode_noz.csv"))
head(df_decode_E1)
df_clean_decode_E1 <- df_decode_E1 %>%
select(Label, SessCode, ClassifyPair, ACC) %>%
mutate(Hemisphere = if_else(grepl("lh", Label), "left",
if_else(grepl("rh", Label), "right", "NA")),
Subject = str_remove(SessCode, "\\_.*")) %>%
left_join(df_label, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
df_decode_acc_E1 <- df_clean_decode_E1 %>%
group_by(Hemisphere, Label, SessCode, ClassifyPair) %>% # divide the data into groups by these columns
summarize(Accuracy = mean(ACC), Count = n()) %>%
ungroup()
df_decode_acc_E1
df_simi <- read_csv(file.path("data", "faceword_E1_Similarity_noz.csv"))
head(df_simi)
df_clean_simi_E1 <- df_simi %>%
mutate(asExchange = if_else(grepl("exchange", PredictCond), 1, 0), # binary prediction
pExchange = Probability_2, # probability prediction
Subject = str_remove(SessCode, "\\_.*")) %>%
left_join(df_label, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
df_rate_simi_E1 <- df_clean_simi_E1 %>%
group_by(SessCode, Label, ClassPair_1, Combination) %>%
summarize(binaryAsExchange = mean(asExchange),
pAsExchange = mean(pExchange),
RateAsExchange = pAsExchange) %>% # use the probability instead of the categorical prediction
ungroup() %>%
mutate(Hemisphere = if_else(grepl("lh", Label), 'left', if_else(grepl("rh", Label), "right", "NA")))
head(df_rate_simi_E1)
# only keep data for these two labels
df_uni_E1_FFA1 <- filter(df_clean_uni_E1, Label %in% label_FFA1)
df_decode_E1_FFA1 <- filter(df_decode_acc_E1, Label %in% label_FFA1)
df_simi_E1_FFA1 <- filter(df_rate_simi_E1, Label %in% label_FFA1)
df_uni_E1_FFA1 %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E1_lFFA1 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA1, Label == label_FFA1[[1]]))
anova_E1_lFFA1
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 11 0.26 1.57 .02 .24
## 2 Layout 1.81, 19.96 0.04 8.00 ** .04 .003
## 3 FaceWord:Layout 2.32, 25.57 0.03 3.67 * .02 .03
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E1_lFFA1 <- emmeans(anova_E1_lFFA1, ~ FaceWord * Layout)
emm_aov_E1_lFFA1 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E1_lFFA1, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## faces - words 0.13 0.104 11 1.252 0.2366
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E1_lFFA1, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.0582 0.0467 33 1.247 0.6020
## intact - top 0.2079 0.0467 33 4.454 0.0005
## intact - bottom 0.1536 0.0467 33 3.290 0.0122
## exchange - top 0.1497 0.0467 33 3.207 0.0150
## exchange - bottom 0.0953 0.0467 33 2.042 0.1935
## top - bottom -0.0544 0.0467 33 -1.164 0.6530
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E1_lFFA1 <- contrast(emm_aov_E1_lFFA1, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E1, interaction = "pairwise") # , adjust = "none"
contr_aov_E1_lFFA1
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.2186 0.1165 17.0 1.877 0.0778
## exchange . faces - words 0.0283 0.1165 17.0 0.243 0.8107
## top . faces - words 0.2428 0.1165 17.0 2.084 0.0525
## bottom . faces - words 0.0304 0.1165 17.0 0.261 0.7974
## . faces intact - exchange 0.1534 0.0635 65.6 2.415 0.0185
## . faces intact - top 0.1958 0.0635 65.6 3.084 0.0030
## . faces intact - bottom 0.2477 0.0635 65.6 3.900 0.0002
## . faces exchange - top 0.0425 0.0635 65.6 0.669 0.5061
## . faces exchange - bottom 0.0943 0.0635 65.6 1.485 0.1423
## . faces top - bottom 0.0519 0.0635 65.6 0.817 0.4171
## . words intact - exchange -0.0369 0.0635 65.6 -0.581 0.5630
## . words intact - top 0.2200 0.0635 65.6 3.464 0.0009
## . words intact - bottom 0.0594 0.0635 65.6 0.936 0.3528
## . words exchange - top 0.2569 0.0635 65.6 4.046 0.0001
## . words exchange - bottom 0.0963 0.0635 65.6 1.517 0.1340
## . words top - bottom -0.1606 0.0635 65.6 -2.528 0.0139
2(face vs. word) \(\times\) 2(intact vs. exchange) ANOVA
anova_E1_lFFA1_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA1,
Label == label_FFA1[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E1_lFFA1_ie, "pes")
emm_E1_lFFA1_ie <- emmeans(anova_E1_lFFA1_ie, ~ FaceWord + Layout)
(simple_E1_lFFA1_ie <- pairs(emm_E1_lFFA1_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.2186 0.1193 14.4 1.833 0.0876
## exchange . faces - words 0.0283 0.1193 14.4 0.238 0.8156
## . faces intact - exchange 0.1534 0.0617 22.0 2.485 0.0210
## . words intact - exchange -0.0369 0.0617 22.0 -0.598 0.5557
contrast(emm_E1_lFFA1_ie, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## faces - words intact - exchange 0.19 0.0887 11 2.145 0.0552
2(face vs. word) \(\times\) 2(top vs. bottom) ANOVA
anova_E1_lFFA1_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA1,
Label == label_FFA1[[1]],
Layout %in% c("top", "bottom")))
anova(anova_E1_lFFA1_tb, "pes")
emm_E1_lFFA1_tb <- emmeans(anova_E1_lFFA1_tb, ~ FaceWord + Layout)
(simple_E1_lFFA1_tb <- pairs(emm_E1_lFFA1_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## top . faces - words 0.2428 0.1136 13.6 2.137 0.0513
## bottom . faces - words 0.0304 0.1136 13.6 0.267 0.7932
## . faces top - bottom 0.0519 0.0547 21.9 0.948 0.3534
## . words top - bottom -0.1606 0.0547 21.9 -2.936 0.0077
contrast(emm_E1_lFFA1_tb, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## faces - words top - bottom 0.212 0.0746 11 2.848 0.0158
anova_E1_rFFA1 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA1, Label == label_FFA1[[2]]))
anova_E1_rFFA1
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 16 0.36 16.57 *** .14 .0009
## 2 Layout 2.44, 38.99 0.05 4.90 ** .02 .009
## 3 FaceWord:Layout 2.28, 36.41 0.06 2.55 + .009 .09
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E1_rFFA1 <- emmeans(anova_E1_rFFA1, ~ FaceWord * Layout)
emm_aov_E1_rFFA1 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E1_rFFA1, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## faces - words 0.419 0.103 16 4.071 0.0009
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E1_rFFA1, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.15479 0.0496 48 3.123 0.0155
## intact - top 0.16431 0.0496 48 3.315 0.0092
## intact - bottom 0.14352 0.0496 48 2.895 0.0281
## exchange - top 0.00952 0.0496 48 0.192 0.9975
## exchange - bottom -0.01127 0.0496 48 -0.227 0.9958
## top - bottom -0.02079 0.0496 48 -0.419 0.9749
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E1_rFFA1 <- contrast(emm_aov_E1_rFFA1, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E1, interaction = "pairwise") # , adjust = "none"
contr_aov_E1_rFFA1
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.5871 0.1208 28.9 4.858 <.0001
## exchange . faces - words 0.3178 0.1208 28.9 2.630 0.0135
## top . faces - words 0.3904 0.1208 28.9 3.230 0.0031
## bottom . faces - words 0.3822 0.1208 28.9 3.163 0.0037
## . faces intact - exchange 0.2894 0.0715 95.8 4.046 0.0001
## . faces intact - top 0.2627 0.0715 95.8 3.672 0.0004
## . faces intact - bottom 0.2459 0.0715 95.8 3.438 0.0009
## . faces exchange - top -0.0268 0.0715 95.8 -0.374 0.7091
## . faces exchange - bottom -0.0435 0.0715 95.8 -0.608 0.5448
## . faces top - bottom -0.0167 0.0715 95.8 -0.234 0.8158
## . words intact - exchange 0.0202 0.0715 95.8 0.282 0.7786
## . words intact - top 0.0660 0.0715 95.8 0.922 0.3587
## . words intact - bottom 0.0411 0.0715 95.8 0.574 0.5670
## . words exchange - top 0.0458 0.0715 95.8 0.640 0.5235
## . words exchange - bottom 0.0209 0.0715 95.8 0.293 0.7705
## . words top - bottom -0.0249 0.0715 95.8 -0.348 0.7288
anova_E1_rFFA1_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA1,
Label == label_FFA1[[2]],
Layout %in% c("intact", "exchange")))
anova(anova_E1_rFFA1_ie, "pes")
emm_E1_rFFA1_ie <- emmeans(anova_E1_rFFA1_ie, ~ FaceWord + Layout)
(simple_E1_rFFA1_ie <- pairs(emm_E1_rFFA1_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.5871 0.1083 26.9 5.420 <.0001
## exchange . faces - words 0.3178 0.1083 26.9 2.934 0.0068
## . faces intact - exchange 0.2894 0.0719 29.7 4.025 0.0004
## . words intact - exchange 0.0202 0.0719 29.7 0.280 0.7811
contrast(emm_E1_rFFA1_ie, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## faces - words intact - exchange 0.269 0.115 16 2.344 0.0323
2(face vs. word) \(\times\) 2(top vs. bottom) ANOVA
anova_E1_rFFA1_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA1,
Label == label_FFA1[[2]],
Layout %in% c("top", "bottom")))
anova(anova_E1_rFFA1_tb, "pes")
emm_E1_rFFA1_tb <- emmeans(anova_E1_rFFA1_tb, ~ FaceWord + Layout)
(simple_E1_rFFA1_tb <- pairs(emm_E1_rFFA1_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## top . faces - words 0.3904 0.1322 21.2 2.953 0.0075
## bottom . faces - words 0.3822 0.1322 21.2 2.891 0.0087
## . faces top - bottom -0.0167 0.0707 32.0 -0.236 0.8146
## . words top - bottom -0.0249 0.0707 32.0 -0.352 0.7272
contrast(emm_E1_rFFA1_tb, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## faces - words top - bottom 0.00817 0.1 16 0.082 0.9359
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_aov_E1_lFFA1))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_FFA1 <- cbind(Hemisphere, rbind(as.data.frame(emm_aov_E1_lFFA1), as.data.frame(emm_aov_E1_rFFA1)))
plot_uni_E1_FFA1 <- plot_uni(desp_uni_E1_FFA1, contr_aov_E1_lFFA1, contr_aov_E1_rFFA1, "FFA1")
# ggsave('plot_uni_E1_FFA1.png', plot_uni_E1_FFA1, width = 10, height = 10)
plot_uni_E1_FFA1
The above figure shows the neural respones (beta values) in FFA1 for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_E1_lFFA1_ie))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_FFA1_ie <- cbind(Hemisphere, rbind(as.data.frame(emm_E1_lFFA1_ie), as.data.frame(emm_E1_rFFA1_ie)))
plot_uni_E1_FFA1_ie <- plot_uni(desp_uni_E1_FFA1_ie, simple_E1_lFFA1_ie, simple_E1_rFFA1_ie, "FFA1", F)
# ggsave('plot_uni_E1_FFA1_ie.png', plot_uni_E1_FFA1_ie, width = 10, height = 5)
plot_uni_E1_FFA1_ie
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_E1_lFFA1_tb))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_FFA1_tb <- cbind(Hemisphere, rbind(as.data.frame(emm_E1_lFFA1_tb), as.data.frame(emm_E1_rFFA1_tb)))
plot_uni_E1_FFA1_tb <- plot_uni(desp_uni_E1_FFA1_tb, simple_E1_lFFA1_tb, simple_E1_rFFA1_tb, "FFA1", F)
# ggsave('plot_uni_E1_FFA1_tb.png', plot_uni_E1_FFA1_tb, width = 10, height = 5)
plot_uni_E1_FFA1_tb
# one-sample for results of decode E1 FFA1
one_decode_agg_E1_FFA1 <- {
df_decode_E1_FFA1 %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E1)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E1_FFA1
plot_decode_E1_FFA1 <- plot_decode(one_decode_agg_E1_FFA1, "FFA1")
# ggsave('plot_decode_E1_FFA1.png', plot_decode_E1_FFA1, width = 6.5, height = 16)
plot_decode_E1_FFA1
The above figure shows the decoding accuracy in FFA1 for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
one_simi_E1_FFA1 <- {
df_simi_E1_FFA1 %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E1_FFA1
plot_simi_E1_FFA1 <- plot_simi(one_simi_E1_FFA1, "FFA1")
# ggsave('plot_simi_E1_FFA1.png', plot_simi_E1_FFA1, width = 8, height = 10)
plot_simi_E1_FFA1
The above figure shows the probability of top+bottom being decoded as exchange conditions in FFA1. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
# only keep data for these two labels
df_uni_E1_FFA2 <- filter(df_clean_uni_E1, Label %in% label_FFA2)
df_decode_E1_FFA2 <- filter(df_decode_acc_E1, Label %in% label_FFA2)
df_simi_E1_FFA2 <- filter(df_rate_simi_E1, Label %in% label_FFA2)
df_uni_E1_FFA2 %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E1_lFFA2 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA2, Label == label_FFA2[[1]]))
anova_E1_lFFA2
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 11 0.07 2.38 .01 .15
## 2 Layout 2.44, 26.83 0.02 7.23 ** .02 .002
## 3 FaceWord:Layout 2.37, 26.03 0.04 0.22 .001 .84
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E1_lFFA2 <- emmeans(anova_E1_lFFA2, ~ FaceWord * Layout)
emm_aov_E1_lFFA2 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E1_lFFA2, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## faces - words 0.0848 0.055 11 1.543 0.1510
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E1_lFFA2, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.09005 0.038 33 2.370 0.1030
## intact - top 0.17686 0.038 33 4.654 0.0003
## intact - bottom 0.08279 0.038 33 2.179 0.1503
## exchange - top 0.08681 0.038 33 2.285 0.1223
## exchange - bottom -0.00726 0.038 33 -0.191 0.9975
## top - bottom -0.09408 0.038 33 -2.476 0.0826
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E1_lFFA2 <- contrast(emm_aov_E1_lFFA2, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E1, interaction = "pairwise") # , adjust = "none"
contr_aov_E1_lFFA2
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.130923 0.0822 36.4 1.593 0.1197
## exchange . faces - words 0.070611 0.0822 36.4 0.859 0.3958
## top . faces - words 0.053261 0.0822 36.4 0.648 0.5209
## bottom . faces - words 0.084486 0.0822 36.4 1.028 0.3106
## . faces intact - exchange 0.120206 0.0627 61.7 1.917 0.0599
## . faces intact - top 0.215695 0.0627 61.7 3.440 0.0010
## . faces intact - bottom 0.106005 0.0627 61.7 1.691 0.0960
## . faces exchange - top 0.095489 0.0627 61.7 1.523 0.1329
## . faces exchange - bottom -0.014201 0.0627 61.7 -0.226 0.8216
## . faces top - bottom -0.109690 0.0627 61.7 -1.749 0.0852
## . words intact - exchange 0.059894 0.0627 61.7 0.955 0.3432
## . words intact - top 0.138033 0.0627 61.7 2.201 0.0315
## . words intact - bottom 0.059568 0.0627 61.7 0.950 0.3458
## . words exchange - top 0.078139 0.0627 61.7 1.246 0.2174
## . words exchange - bottom -0.000326 0.0627 61.7 -0.005 0.9959
## . words top - bottom -0.078465 0.0627 61.7 -1.251 0.2155
2(face vs. word) \(\times\) 2(intact vs. exchange) ANOVA
anova_E1_lFFA2_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA2,
Label == label_FFA2[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E1_lFFA2_ie, "pes")
emm_E1_lFFA2_ie <- emmeans(anova_E1_lFFA2_ie, ~ FaceWord + Layout)
(simple_E1_lFFA2_ie <- pairs(emm_E1_lFFA2_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.1309 0.0869 17.6 1.507 0.1497
## exchange . faces - words 0.0706 0.0869 17.6 0.813 0.4274
## . faces intact - exchange 0.1202 0.0633 21.9 1.900 0.0706
## . words intact - exchange 0.0599 0.0633 21.9 0.947 0.3540
contrast(emm_E1_lFFA2_ie, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## faces - words intact - exchange 0.0603 0.0867 11 0.696 0.5010
2(face vs. word) \(\times\) 2(top vs. bottom) ANOVA
anova_E1_lFFA2_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA2,
Label == label_FFA2[[1]],
Layout %in% c("top", "bottom")))
anova(anova_E1_lFFA2_tb, "pes")
emm_E1_lFFA2_tb <- emmeans(anova_E1_lFFA2_tb, ~ FaceWord + Layout)
(simple_E1_lFFA2_tb <- pairs(emm_E1_lFFA2_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## top . faces - words 0.0533 0.0771 21.8 0.690 0.4972
## bottom . faces - words 0.0845 0.0771 21.8 1.095 0.2854
## . faces top - bottom -0.1097 0.0574 15.5 -1.911 0.0746
## . words top - bottom -0.0785 0.0574 15.5 -1.367 0.1911
contrast(emm_E1_lFFA2_tb, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## faces - words top - bottom -0.0312 0.104 11 -0.300 0.7700
anova_E1_rFFA2 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA2, Label == label_FFA2[[2]]))
anova_E1_rFFA2
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 12 0.19 9.67 ** .10 .009
## 2 Layout 2.49, 29.93 0.03 9.97 *** .04 .0002
## 3 FaceWord:Layout 2.25, 26.96 0.03 3.58 * .02 .04
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E1_rFFA2 <- emmeans(anova_E1_rFFA2, ~ FaceWord * Layout)
emm_aov_E1_rFFA2 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E1_rFFA2, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## faces - words 0.269 0.0866 12 3.110 0.0090
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E1_rFFA2, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.19346 0.0409 36 4.725 0.0002
## intact - top 0.19129 0.0409 36 4.672 0.0002
## intact - bottom 0.15158 0.0409 36 3.702 0.0038
## exchange - top -0.00216 0.0409 36 -0.053 0.9999
## exchange - bottom -0.04188 0.0409 36 -1.023 0.7373
## top - bottom -0.03971 0.0409 36 -0.970 0.7672
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E1_rFFA2 <- contrast(emm_aov_E1_rFFA2, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E1, interaction = "pairwise") # , adjust = "none"
contr_aov_E1_rFFA2
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.43226 0.1015 21.6 4.260 0.0003
## exchange . faces - words 0.16056 0.1015 21.6 1.582 0.1281
## top . faces - words 0.25139 0.1015 21.6 2.478 0.0215
## bottom . faces - words 0.23295 0.1015 21.6 2.296 0.0318
## . faces intact - exchange 0.32931 0.0595 71.8 5.533 <.0001
## . faces intact - top 0.28173 0.0595 71.8 4.734 <.0001
## . faces intact - bottom 0.25124 0.0595 71.8 4.221 0.0001
## . faces exchange - top -0.04758 0.0595 71.8 -0.799 0.4267
## . faces exchange - bottom -0.07807 0.0595 71.8 -1.312 0.1938
## . faces top - bottom -0.03049 0.0595 71.8 -0.512 0.6100
## . words intact - exchange 0.05761 0.0595 71.8 0.968 0.3363
## . words intact - top 0.10086 0.0595 71.8 1.695 0.0945
## . words intact - bottom 0.05192 0.0595 71.8 0.872 0.3859
## . words exchange - top 0.04325 0.0595 71.8 0.727 0.4698
## . words exchange - bottom -0.00569 0.0595 71.8 -0.096 0.9242
## . words top - bottom -0.04894 0.0595 71.8 -0.822 0.4136
anova_E1_rFFA2_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA2,
Label == label_FFA2[[2]],
Layout %in% c("intact", "exchange")))
anova(anova_E1_rFFA2_ie, "pes")
emm_E1_rFFA2_ie <- emmeans(anova_E1_rFFA2_ie, ~ FaceWord + Layout)
(simple_E1_rFFA2_ie <- pairs(emm_E1_rFFA2_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.4323 0.0993 15.2 4.354 0.0006
## exchange . faces - words 0.1606 0.0993 15.2 1.617 0.1264
## . faces intact - exchange 0.3293 0.0547 23.0 6.016 <.0001
## . words intact - exchange 0.0576 0.0547 23.0 1.052 0.3036
contrast(emm_E1_rFFA2_ie, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## faces - words intact - exchange 0.272 0.0687 12 3.955 0.0019
2(face vs. word) \(\times\) 2(top vs. bottom) ANOVA
anova_E1_rFFA2_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA2,
Label == label_FFA2[[2]],
Layout %in% c("top", "bottom")))
anova(anova_E1_rFFA2_tb, "pes")
emm_E1_rFFA2_tb <- emmeans(anova_E1_rFFA2_tb, ~ FaceWord + Layout)
(simple_E1_rFFA2_tb <- pairs(emm_E1_rFFA2_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## top . faces - words 0.2514 0.1036 19.5 2.427 0.0251
## bottom . faces - words 0.2329 0.1036 19.5 2.249 0.0363
## . faces top - bottom -0.0305 0.0618 19.9 -0.493 0.6273
## . words top - bottom -0.0489 0.0618 19.9 -0.791 0.4381
contrast(emm_E1_rFFA2_tb, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## faces - words top - bottom 0.0184 0.105 12 0.175 0.8641
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_aov_E1_lFFA2))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_FFA2 <- cbind(Hemisphere, rbind(as.data.frame(emm_aov_E1_lFFA2), as.data.frame(emm_aov_E1_rFFA2)))
plot_uni_E1_FFA2 <- plot_uni(desp_uni_E1_FFA2, contr_aov_E1_lFFA2, contr_aov_E1_rFFA2, "FFA2")
# ggsave('plot_uni_E1_FFA2.png', plot_uni_E1_FFA2, width = 10, height = 10)
plot_uni_E1_FFA2
The above figure shows the neural respones (beta values) in FFA2 for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_E1_lFFA2_ie))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_FFA2_ie <- cbind(Hemisphere, rbind(as.data.frame(emm_E1_lFFA2_ie), as.data.frame(emm_E1_rFFA2_ie)))
plot_uni_E1_FFA2_ie <- plot_uni(desp_uni_E1_FFA2_ie, simple_E1_lFFA2_ie, simple_E1_rFFA2_ie, "FFA2", F)
# ggsave('plot_uni_E1_FFA2_ie.png', plot_uni_E1_FFA2_ie, width = 10, height = 5)
plot_uni_E1_FFA2_ie
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_E1_lFFA2_tb))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_FFA2_tb <- cbind(Hemisphere, rbind(as.data.frame(emm_E1_lFFA2_tb), as.data.frame(emm_E1_rFFA2_tb)))
plot_uni_E1_FFA2_tb <- plot_uni(desp_uni_E1_FFA2_tb, simple_E1_lFFA2_tb, simple_E1_rFFA2_tb, "FFA2", F)
# ggsave('plot_uni_E1_FFA2_tb.png', plot_uni_E1_FFA2_tb, width = 10, height = 5)
plot_uni_E1_FFA2_tb
# one-sample for results of decode E1 FFA2
one_decode_agg_E1_FFA2 <- {
df_decode_E1_FFA2 %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E1)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E1_FFA2
plot_decode_E1_FFA2 <- plot_decode(one_decode_agg_E1_FFA2, "FFA2")
# ggsave('plot_decode_E1_FFA2.png', plot_decode_E1_FFA2, width = 6.5, height = 16)
plot_decode_E1_FFA2
The above figure shows the decoding accuracy in FFA2 for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
# Similarity of top + bottom to intact vs. exchange in FFA
one_simi_E1_FFA2 <- {
df_simi_E1_FFA2 %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E1_FFA2
plot_simi_E1_FFA2 <- plot_simi(one_simi_E1_FFA2, "FFA2")
# ggsave('plot_simi_E1_FFA2.png', plot_simi_E1_FFA2, width = 8, height = 10)
plot_simi_E1_FFA2
The above figure shows the probability of top+bottom being decoded as exchange conditions in FFA2. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
# only keep data for these two labels
df_uni_E1_VWFA <- filter(df_clean_uni_E1, Label %in% label_VWFA)
df_decode_E1_VWFA <- filter(df_decode_acc_E1, Label %in% label_VWFA)
df_simi_E1_VWFA <- filter(df_rate_simi_E1, Label %in% label_VWFA)
# subjects used for each hemisphere
# unique(as.character((df_univar_agg_E1_VWFA %>% filter(Label == label_VWFA_E1))$SubjCode))
df_uni_E1_VWFA %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E1_VWFA <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_VWFA, Label == label_VWFA))
anova_E1_VWFA
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 17 0.21 101.99 *** .26 <.0001
## 2 Layout 2.62, 44.47 0.03 4.17 * .005 .01
## 3 FaceWord:Layout 2.64, 44.92 0.02 6.13 ** .006 .002
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E1_VWFA <- emmeans(anova_E1_VWFA, ~ FaceWord * Layout)
emm_aov_E1_VWFA %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E1_VWFA, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## faces - words -0.773 0.0765 17 -10.099 <.0001
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E1_VWFA, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange -0.0550 0.0375 51 -1.465 0.4655
## intact - top 0.0766 0.0375 51 2.041 0.1869
## intact - bottom 0.0174 0.0375 51 0.464 0.9665
## exchange - top 0.1316 0.0375 51 3.506 0.0051
## exchange - bottom 0.0724 0.0375 51 1.930 0.2287
## top - bottom -0.0592 0.0375 51 -1.576 0.4010
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E1_VWFA <- contrast(emm_aov_E1_VWFA, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_aov_E1, interaction = "pairwise") # , adjust = "none"
contr_aov_E1_VWFA
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words -0.69655 0.0866 27.2 -8.044 <.0001
## exchange . faces - words -0.90265 0.0866 27.2 -10.425 <.0001
## top . faces - words -0.65610 0.0866 27.2 -7.577 <.0001
## bottom . faces - words -0.83587 0.0866 27.2 -9.653 <.0001
## . faces intact - exchange 0.04805 0.0500 100.4 0.960 0.3392
## . faces intact - top 0.05638 0.0500 100.4 1.127 0.2625
## . faces intact - bottom 0.08708 0.0500 100.4 1.740 0.0848
## . faces exchange - top 0.00833 0.0500 100.4 0.166 0.8681
## . faces exchange - bottom 0.03904 0.0500 100.4 0.780 0.4371
## . faces top - bottom 0.03071 0.0500 100.4 0.614 0.5408
## . words intact - exchange -0.15806 0.0500 100.4 -3.159 0.0021
## . words intact - top 0.09682 0.0500 100.4 1.935 0.0558
## . words intact - bottom -0.05223 0.0500 100.4 -1.044 0.2990
## . words exchange - top 0.25488 0.0500 100.4 5.094 <.0001
## . words exchange - bottom 0.10583 0.0500 100.4 2.115 0.0369
## . words top - bottom -0.14905 0.0500 100.4 -2.979 0.0036
2(face vs. word) \(\times\) 2(intact vs. exchange) ANOVA
anova_E1_VWFA_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_VWFA,
Label == label_VWFA[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E1_VWFA_ie, "pes")
emm_E1_VWFA_ie <- emmeans(anova_E1_VWFA_ie, ~ FaceWord + Layout)
(simple_E1_VWFA_ie <- pairs(emm_E1_VWFA_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words -0.697 0.0878 20.7 -7.931 <.0001
## exchange . faces - words -0.903 0.0878 20.7 -10.278 <.0001
## . faces intact - exchange 0.048 0.0390 34.0 1.232 0.2266
## . words intact - exchange -0.158 0.0390 34.0 -4.052 0.0003
contrast(emm_E1_VWFA_ie, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## faces - words intact - exchange 0.206 0.0551 17 3.743 0.0016
2(face vs. word) \(\times\) 2(top vs. bottom) ANOVA
anova_E1_VWFA_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_VWFA,
Label == label_VWFA[[1]],
Layout %in% c("top", "bottom")))
anova(anova_E1_VWFA_tb, "pes")
emm_E1_VWFA_tb <- emmeans(anova_E1_VWFA_tb, ~ FaceWord + Layout)
(simple_E1_VWFA_tb <- pairs(emm_E1_VWFA_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## top . faces - words -0.6561 0.0853 23.8 -7.689 <.0001
## bottom . faces - words -0.8359 0.0853 23.8 -9.796 <.0001
## . faces top - bottom 0.0307 0.0527 33.7 0.583 0.5640
## . words top - bottom -0.1491 0.0527 33.7 -2.828 0.0078
contrast(emm_E1_VWFA_tb, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## faces - words top - bottom 0.18 0.0708 17 2.539 0.0212
nRow_E1 <-nrow(as.data.frame(emm_aov_E1_VWFA))
Hemisphere <- c(rep("left", nRow_E1))
desp_uni_E1_VWFA <- cbind(Hemisphere, as.data.frame(emm_aov_E1_VWFA))
plot_uni_E1_VWFA <- plot_uni_vwfa(desp_uni_E1_VWFA, contr_aov_E1_VWFA, "VWFA")
# ggsave('plot_uni_E1_VWFA.png', plot_uni_E1_VWFA, width = 5.5, height = 10)
plot_uni_E1_VWFA
The above figure shows the neural respones (beta values) in VWFA for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: *, p < .05
nRow_E1 <-nrow(as.data.frame(emm_E1_VWFA_ie))
Hemisphere <- c(rep("left", nRow_E1))
desp_uni_E1_VWFA_ie <- cbind(Hemisphere, as.data.frame(emm_E1_VWFA_ie))
plot_uni_E1_VWFA_ie <- plot_uni_vwfa(desp_uni_E1_VWFA_ie, simple_E1_VWFA_ie, "VWFA", FALSE)
# ggsave('plot_uni_E1_VWFA_ie.png', plot_uni_E1_VWFA_ie, width = 5.5, height = 10)
plot_uni_E1_VWFA_ie
nRow_E1 <-nrow(as.data.frame(emm_E1_VWFA_tb))
Hemisphere <- c(rep("left", nRow_E1))
desp_uni_E1_VWFA_tb <- cbind(Hemisphere, as.data.frame(emm_E1_VWFA_tb))
plot_uni_E1_VWFA_tb <- plot_uni_vwfa(desp_uni_E1_VWFA_tb, simple_E1_VWFA_tb, "VWFA", FALSE)
# ggsave('plot_uni_E1_VWFA_tb.png', plot_uni_E1_VWFA_tb, width = 5.5, height = 10)
plot_uni_E1_VWFA_tb
# one-sample for results of decode E1 VWFA
one_decode_agg_E1_VWFA <- {
df_decode_E1_VWFA %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E1)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E1_VWFA
plot_decode_E1_VWFA <- plot_decode_vwfa(one_decode_agg_E1_VWFA, "VWFA")
# ggsave('plot_decode_E1_VWFA.png', plot_decode_E1_VWFA, width = 4, height = 16)
plot_decode_E1_VWFA
The above figure shows the decoding accuracy in VWFA for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: ***, p <.001
# Similarity of top + bottom to intact vs. exchange in VWFA
one_simi_E1_VWFA <- {
df_simi_E1_VWFA %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E1_VWFA
plot_simi_E1_VWFA <- plot_simi_vwfa(one_simi_E1_VWFA, "VWFA")
# ggsave('plot_simi_E1_VWFA.png', plot_simi_E1_VWFA, width = 4.25, height = 10)
plot_simi_E1_VWFA
The above figure shows the probability of top+bottom being decoded as exchange conditions in VWFA. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
# only keep data for these two labels
df_uni_E1_LO <- filter(df_clean_uni_E1, Label %in% label_LO)
df_decode_E1_LO <- filter(df_decode_acc_E1, Label %in% label_LO)
df_simi_E1_LO <- filter(df_rate_simi_E1, Label %in% label_LO)
df_uni_E1_LO %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E1_lLO <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_LO, Label == label_LO[[1]]))
anova_E1_lLO
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 18 0.21 23.83 *** .06 .0001
## 2 Layout 2.27, 40.95 0.05 3.42 * .005 .04
## 3 FaceWord:Layout 2.45, 44.12 0.03 0.46 .0004 .67
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E1_lLO <- emmeans(anova_E1_lLO, ~ FaceWord * Layout)
emm_aov_E1_lLO %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E1_lLO, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## faces - words -0.366 0.0749 18 -4.882 0.0001
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E1_lLO, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.00901 0.0433 54 0.208 0.9968
## intact - top 0.12389 0.0433 54 2.864 0.0295
## intact - bottom 0.05298 0.0433 54 1.225 0.6141
## exchange - top 0.11488 0.0433 54 2.655 0.0495
## exchange - bottom 0.04397 0.0433 54 1.016 0.7406
## top - bottom -0.07091 0.0433 54 -1.639 0.3659
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E1_lLO <- contrast(emm_aov_E1_lLO, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_aov_E1, interaction = "pairwise") # , adjust = "none"
contr_aov_E1_lLO
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words -0.33063 0.0873 31.8 -3.786 0.0006
## exchange . faces - words -0.34607 0.0873 31.8 -3.963 0.0004
## top . faces - words -0.37604 0.0873 31.8 -4.306 0.0001
## bottom . faces - words -0.41056 0.0873 31.8 -4.701 <.0001
## . faces intact - exchange 0.01672 0.0567 105.1 0.295 0.7685
## . faces intact - top 0.14659 0.0567 105.1 2.586 0.0111
## . faces intact - bottom 0.09294 0.0567 105.1 1.640 0.1040
## . faces exchange - top 0.12986 0.0567 105.1 2.291 0.0239
## . faces exchange - bottom 0.07622 0.0567 105.1 1.345 0.1816
## . faces top - bottom -0.05364 0.0567 105.1 -0.947 0.3461
## . words intact - exchange 0.00129 0.0567 105.1 0.023 0.9819
## . words intact - top 0.10118 0.0567 105.1 1.785 0.0771
## . words intact - bottom 0.01302 0.0567 105.1 0.230 0.8188
## . words exchange - top 0.09989 0.0567 105.1 1.763 0.0809
## . words exchange - bottom 0.01173 0.0567 105.1 0.207 0.8365
## . words top - bottom -0.08817 0.0567 105.1 -1.556 0.1228
2(face vs. word) \(\times\) 2(intact vs. exchange) ANOVA
anova_E1_lLO_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_LO,
Label == label_LO[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E1_lLO_ie, "pes")
emm_E1_lLO_ie <- emmeans(anova_E1_lLO_ie, ~ FaceWord + Layout)
(simple_E1_lLO_ie <- pairs(emm_E1_lLO_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words -0.33063 0.0955 21.8 -3.461 0.0022
## exchange . faces - words -0.34607 0.0955 21.8 -3.623 0.0015
## . faces intact - exchange 0.01672 0.0462 34.9 0.362 0.7196
## . words intact - exchange 0.00129 0.0462 34.9 0.028 0.9779
contrast(emm_E1_lLO_ie, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## faces - words intact - exchange 0.0154 0.0594 18 0.260 0.7978
2(face vs. word) \(\times\) 2(top vs. bottom) ANOVA
anova_E1_lLO_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_LO,
Label == label_LO[[1]],
Layout %in% c("top", "bottom")))
anova(anova_E1_lLO_tb, "pes")
emm_E1_lLO_tb <- emmeans(anova_E1_lLO_tb, ~ FaceWord + Layout)
(simple_E1_lLO_tb <- pairs(emm_E1_lLO_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## top . faces - words -0.3760 0.0783 24.1 -4.804 0.0001
## bottom . faces - words -0.4106 0.0783 24.1 -5.245 <.0001
## . faces top - bottom -0.0536 0.0536 31.8 -1.001 0.3246
## . words top - bottom -0.0882 0.0536 31.8 -1.644 0.1100
contrast(emm_E1_lLO_tb, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## faces - words top - bottom 0.0345 0.0606 18 0.570 0.5757
aov_E1_rLO <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_LO, Label == label_LO[[2]]))
aov_E1_rLO
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 17 0.20 9.40 ** .02 .007
## 2 Layout 2.20, 37.35 0.07 1.39 .002 .26
## 3 FaceWord:Layout 2.76, 46.96 0.03 1.63 .001 .20
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E1_rLO <- emmeans(aov_E1_rLO, ~ FaceWord * Layout)
emm_aov_E1_rLO %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E1_rLO, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## faces - words -0.226 0.0737 17 -3.065 0.0070
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E1_rLO, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.0872 0.055 51 1.586 0.3957
## intact - top 0.1034 0.055 51 1.879 0.2497
## intact - bottom 0.0496 0.055 51 0.901 0.8041
## exchange - top 0.0162 0.055 51 0.294 0.9911
## exchange - bottom -0.0376 0.055 51 -0.684 0.9026
## top - bottom -0.0538 0.055 51 -0.978 0.7626
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E1_rLO <- contrast(emm_aov_E1_rLO, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_aov_E1, interaction = "pairwise") # , adjust = "none"
contr_aov_E1_rLO
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words -0.139667 0.0884 33.1 -1.580 0.1236
## exchange . faces - words -0.314757 0.0884 33.1 -3.560 0.0011
## top . faces - words -0.234501 0.0884 33.1 -2.653 0.0122
## bottom . faces - words -0.214267 0.0884 33.1 -2.424 0.0210
## . faces intact - exchange 0.174783 0.0680 93.0 2.572 0.0117
## . faces intact - top 0.150806 0.0680 93.0 2.219 0.0289
## . faces intact - bottom 0.086894 0.0680 93.0 1.279 0.2042
## . faces exchange - top -0.023978 0.0680 93.0 -0.353 0.7250
## . faces exchange - bottom -0.087889 0.0680 93.0 -1.293 0.1992
## . faces top - bottom -0.063911 0.0680 93.0 -0.940 0.3495
## . words intact - exchange -0.000307 0.0680 93.0 -0.005 0.9964
## . words intact - top 0.055972 0.0680 93.0 0.824 0.4123
## . words intact - bottom 0.012294 0.0680 93.0 0.181 0.8568
## . words exchange - top 0.056278 0.0680 93.0 0.828 0.4098
## . words exchange - bottom 0.012601 0.0680 93.0 0.185 0.8533
## . words top - bottom -0.043677 0.0680 93.0 -0.643 0.5220
anova_E1_rLO_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_LO,
Label == label_LO[[2]],
Layout %in% c("intact", "exchange")))
anova(anova_E1_rLO_ie, "pes")
emm_E1_rLO_ie <- emmeans(anova_E1_rLO_ie, ~ FaceWord + Layout)
(simple_E1_rLO_ie <- pairs(emm_E1_rLO_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words -0.139667 0.0896 23.8 -1.559 0.1322
## exchange . faces - words -0.314757 0.0896 23.8 -3.513 0.0018
## . faces intact - exchange 0.174783 0.0587 32.8 2.978 0.0054
## . words intact - exchange -0.000307 0.0587 32.8 -0.005 0.9959
contrast(emm_E1_rLO_ie, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## faces - words intact - exchange 0.175 0.0746 17 2.348 0.0312
2(face vs. word) \(\times\) 2(top vs. bottom) ANOVA
anova_E1_rLO_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_LO,
Label == label_LO[[2]],
Layout %in% c("top", "bottom")))
anova(anova_E1_rLO_tb, "pes")
emm_E1_rLO_tb <- emmeans(anova_E1_rLO_tb, ~ FaceWord + Layout)
(simple_E1_rLO_tb <- pairs(emm_E1_rLO_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## top . faces - words -0.2345 0.0872 26.4 -2.689 0.0122
## bottom . faces - words -0.2143 0.0872 26.4 -2.457 0.0209
## . faces top - bottom -0.0639 0.0624 33.7 -1.024 0.3129
## . words top - bottom -0.0437 0.0624 33.7 -0.700 0.4887
contrast(emm_E1_rLO_tb, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## faces - words top - bottom -0.0202 0.0839 17 -0.241 0.8123
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_aov_E1_lLO))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_LO <- cbind(Hemisphere, rbind(as.data.frame(emm_aov_E1_lLO), as.data.frame(emm_aov_E1_rLO)))
plot_uni_E1_LO <- plot_uni(desp_uni_E1_LO, contr_aov_E1_lLO, contr_aov_E1_rLO, "LO")
# ggsave('plot_uni_E1_LO.png', plot_uni_E1_LO, width = 10, height = 10)
plot_uni_E1_LO
The above figure shows the neural respones (beta values) in LO for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: *, p < .05
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_E1_lLO_ie))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_LO_ie <- cbind(Hemisphere, rbind(as.data.frame(emm_E1_lLO_ie), as.data.frame(emm_E1_rLO_ie)))
plot_uni_E1_LO_ie <- plot_uni(desp_uni_E1_LO_ie, simple_E1_lLO_ie, simple_E1_rLO_ie, "LO", F)
# ggsave('plot_uni_E1_LO_ie.png', plot_uni_E1_LO_ie, width = 10, height = 5)
plot_uni_E1_LO_ie
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_E1_lLO_tb))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_LO_tb <- cbind(Hemisphere, rbind(as.data.frame(emm_E1_lLO_tb), as.data.frame(emm_E1_rLO_tb)))
plot_uni_E1_LO_tb <- plot_uni(desp_uni_E1_LO_tb, simple_E1_lLO_tb, simple_E1_rLO_tb, "LO", F)
# ggsave('plot_uni_E1_LO_tb.png', plot_uni_E1_LO_tb, width = 10, height = 5)
plot_uni_E1_LO_tb
# one-sample for results of decode E1 LO
one_decode_agg_E1_LO <- {
df_decode_E1_LO %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E1)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E1_LO
plot_decode_E1_LO <- plot_decode(one_decode_agg_E1_LO, "LO")
# ggsave('plot_decode_E1_LO.png', plot_decode_E1_LO, width = 6.5, height = 16)
plot_decode_E1_LO
The above figure shows the decoding accuracy in LO for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: , p < .01; *, p <.001
# Similarity of top + bottom to intact vs. exchange in LO
one_simi_E1_LO <- {
df_simi_E1_LO %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E1_LO
plot_simi_E1_LO <- plot_simi(one_simi_E1_LO, "LO")
# ggsave('plot_simi_E1_LO.png', plot_simi_E1_LO, width = 8, height = 10)
plot_simi_E1_LO
The above figure shows the probability of top+bottom being decoded as exchange conditions in LO. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
df_label_E2 <- read_csv(file.path("data", "faceword_E2_Label_HJ.csv")) %>%
mutate(roi = str_remove(Label, "roi."),
roi = str_remove(roi, ".label")) %>%
mutate(Subject = str_replace(SubjCode, "\\_.*", ""))
# df_label %>% head()
df_label_E2 %>%
select(SubjCode, roi, Size) %>%
pivot_wider(names_from = roi, values_from = Size) %>%
arrange(SubjCode)
The above table displays the size (in mm2) of each label for each participant. (NA denotes that this label is not available for that particiapnt.)
df_label_E2 %>%
select(SubjCode, roi, NVtxs) %>%
pivot_wider(names_from = roi, values_from = NVtxs) %>%
arrange(SubjCode)
The above table displays the number of vertices for each label and each participant. (NA denotes that this label is not available for that particiapnt.)
df_label_E2 %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
SDSize = sd(Size),
meanNVtx = mean(NVtxs),
SDNVtx = sd(NVtxs))
df_nlabel_E2 <- df_label_E2 %>%
filter(Size > nVtx_size_min) %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
SDSize = sd(Size),
meanNVtx = mean(NVtxs),
SDNVtx = sd(NVtxs))
df_nlabel_E2
The above table dispalys the number of participants included in the following analyses for each ROI. (VWFA is only found on the left hemisphere.)
# load data file from functional scans for univerate analysis
df_uni_E2 <- read_csv(file.path("data", "faceword_E2_Uni_HJ.csv"))
head(df_uni_E2)
df_clean_uni_E2 <- {
df_uni_E2 %>%
filter(Response != "NaN") %>%
separate(Condition, c("FaceWord", "Layout"), "_") %>% # separate the conditions into two IVs
mutate(Layout_ = factor(Layout, levels = layout_order), # convert the two IVs to factors
Hemisphere = if_else(grepl("lh", Label), "left", if_else(grepl("rh", Label), "right", "NA")),
Layout = fct_recode(Layout_, partA = "top", partB = "bottom")) %>% # rename top and bottom as part1 and part2
select(Hemisphere, Label, SessCode, FaceWord, Layout, Response) %>%
mutate(Subject = str_replace(SessCode, "\\_.*", "")) %>%
left_join(df_label_E2, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
}
head(df_clean_uni_E2)
pair_order_E2 <- c("English_intact-Chinese_intact",
"English_intact-English_exchange",
"English_partA-English_partB", # English_top-English_bottom
"Chinese_intact-Chinese_exchange",
"Chinese_partA-Chinese_partB") # Chinese_top-Chinese_bottom
df_decode_E2 <- read_csv(file.path("data", "faceword_E2_Decode_noz.csv"))
head(df_decode_E2)
df_clean_decode_E2 <- df_decode_E2 %>%
select(Label, SessCode, ClassifyPair, ACC) %>%
mutate(Hemisphere = if_else(grepl("lh", Label), "left",
if_else(grepl("rh", Label), "right", "NA")),
Subject = str_remove(SessCode, "\\_.*"),
ClassifyPair = fct_recode(ClassifyPair,
`Chinese_partA-Chinese_partB` = "Chinese_top-Chinese_bottom",
`English_partA-English_partB` = "English_top-English_bottom"),
ClassifyPair = factor(ClassifyPair, levels = pair_order_E2)) %>%
left_join(df_label_E2, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
df_decode_acc_E2 <- df_clean_decode_E2 %>%
group_by(Hemisphere, Label, SessCode, ClassifyPair) %>% # divide the data into groups by these columns
summarize(Accuracy = mean(ACC), Count = n()) %>%
ungroup()
df_decode_acc_E2
simi_order_E2 <- c("English_partA0.25-English_partB0.75",
"English_partA0.50-English_partB0.50",
"English_partA0.75-English_partB0.25",
"Chinese_partA0.25-Chinese_partB0.75",
"Chinese_partA0.50-Chinese_partB0.50",
"Chinese_partA0.75-Chinese_partB0.25")
df_simi_E2 <- read_csv(file.path("data", "faceword_E2_Similarity_noz.csv"))
head(df_simi_E2)
df_clean_simi_E2 <- df_simi_E2 %>%
mutate(asExchange = if_else(grepl("exchange", PredictCond), 1, 0), # binary prediction
pExchange = Probability_2, # probability prediction
Subject = str_remove(SessCode, "\\_.*"),
Combination = gsub("top", "partA", Combination),
Combination = gsub("bottom", "partB", Combination),
Combination = factor(Combination, levels = simi_order_E2)) %>%
left_join(df_label_E2, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
df_rate_simi_E2 <- df_clean_simi_E2 %>%
group_by(SessCode, Label, ClassPair_1, Combination) %>%
summarize(binaryAsExchange = mean(asExchange),
pAsExchange = mean(pExchange),
RateAsExchange = pAsExchange) %>% # use the probability instead of the categorical prediction
ungroup() %>%
mutate(Hemisphere = if_else(grepl("lh", Label), 'left', if_else(grepl("rh", Label), "right", "NA")))
head(df_rate_simi_E2)
# only keep data for these two labels
df_uni_E2_FFA1 <- filter(df_clean_uni_E2, Label %in% label_FFA1)
df_decode_E2_FFA1 <- filter(df_decode_acc_E2, Label %in% label_FFA1)
df_simi_E2_FFA1 <- filter(df_rate_simi_E2, Label %in% label_FFA1)
df_uni_E2_FFA1 %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E2_lFFA1 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA1, Label == label_FFA1[[1]]))
anova_E2_lFFA1
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 11 0.22 12.53 ** .10 .005
## 2 Layout 1.73, 19.00 0.03 3.34 + .007 .06
## 3 FaceWord:Layout 2.25, 24.78 0.04 4.10 * .01 .03
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E2_lFFA1 <- emmeans(anova_E2_lFFA1, ~ FaceWord * Layout)
emm_aov_E2_lFFA1 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E2_lFFA1, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## English - Chinese 0.338 0.0954 11 3.539 0.0046
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E2_lFFA1, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange -0.10656 0.0394 33 -2.706 0.0499
## intact - partA 0.00125 0.0394 33 0.032 1.0000
## intact - partB -0.04698 0.0394 33 -1.193 0.6354
## exchange - partA 0.10781 0.0394 33 2.738 0.0464
## exchange - partB 0.05958 0.0394 33 1.513 0.4414
## partA - partB -0.04823 0.0394 33 -1.225 0.6159
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E2_lFFA1 <- contrast(emm_aov_E2_lFFA1, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E2, interaction = "pairwise") # , adjust = "none"
contr_aov_E2_lFFA1
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.34336 0.1134 20.8 3.027 0.0065
## exchange . English - Chinese 0.44662 0.1134 20.8 3.937 0.0008
## partA . English - Chinese 0.13346 0.1134 20.8 1.176 0.2527
## partB . English - Chinese 0.42759 0.1134 20.8 3.769 0.0011
## . English intact - exchange -0.15819 0.0637 62.5 -2.483 0.0157
## . English intact - partA 0.10620 0.0637 62.5 1.667 0.1005
## . English intact - partB -0.08909 0.0637 62.5 -1.398 0.1669
## . English exchange - partA 0.26440 0.0637 62.5 4.150 0.0001
## . English exchange - partB 0.06910 0.0637 62.5 1.085 0.2823
## . English partA - partB -0.19530 0.0637 62.5 -3.065 0.0032
## . Chinese intact - exchange -0.05493 0.0637 62.5 -0.862 0.3919
## . Chinese intact - partA -0.10370 0.0637 62.5 -1.628 0.1086
## . Chinese intact - partB -0.00486 0.0637 62.5 -0.076 0.9394
## . Chinese exchange - partA -0.04877 0.0637 62.5 -0.765 0.4469
## . Chinese exchange - partB 0.05007 0.0637 62.5 0.786 0.4349
## . Chinese partA - partB 0.09884 0.0637 62.5 1.551 0.1259
2(face vs. word) \(\times\) 2(intact vs. exchange) ANOVA
anova_E2_lFFA1_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA1,
Label == label_FFA1[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E2_lFFA1_ie, "pes")
emm_E2_lFFA1_ie <- emmeans(anova_E2_lFFA1_ie, ~ FaceWord + Layout)
(simple_E2_lFFA1_ie <- pairs(emm_E2_lFFA1_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.3434 0.1178 16.8 2.915 0.0098
## exchange . English - Chinese 0.4466 0.1178 16.8 3.791 0.0015
## . English intact - exchange -0.1582 0.0741 21.7 -2.134 0.0444
## . Chinese intact - exchange -0.0549 0.0741 21.7 -0.741 0.4666
contrast(emm_E2_lFFA1_ie, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## English - Chinese intact - exchange -0.103 0.111 11 -0.933 0.3707
2(face vs. word) \(\times\) 2(top vs. bottom) ANOVA
anova_E2_lFFA1_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA1,
Label == label_FFA1[[1]],
Layout %in% c("partA", "partB")))
anova(anova_E2_lFFA1_tb, "pes")
emm_E2_lFFA1_tb <- emmeans(anova_E2_lFFA1_tb, ~ FaceWord + Layout)
(simple_E2_lFFA1_tb <- pairs(emm_E2_lFFA1_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## partA . English - Chinese 0.1335 0.1089 18.6 1.225 0.2358
## partB . English - Chinese 0.4276 0.1089 18.6 3.926 0.0009
## . English partA - partB -0.1953 0.0647 16.0 -3.019 0.0082
## . Chinese partA - partB 0.0988 0.0647 16.0 1.528 0.1461
contrast(emm_E2_lFFA1_tb, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## English - Chinese partA - partB -0.294 0.116 11 -2.531 0.0279
anova_E2_rFFA1 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA1, Label == label_FFA1[[2]]))
anova_E2_rFFA1
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 14 0.13 0.65 .008 .43
## 2 Layout 2.74, 38.34 0.03 2.08 .02 .12
## 3 FaceWord:Layout 2.18, 30.55 0.03 1.39 .009 .26
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E2_rFFA1 <- emmeans(anova_E2_rFFA1, ~ FaceWord * Layout)
emm_aov_E2_rFFA1 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E2_rFFA1, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## English - Chinese -0.0522 0.0648 14 -0.805 0.4341
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E2_rFFA1, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.0605 0.0406 42 1.490 0.4522
## intact - partA -0.0299 0.0406 42 -0.738 0.8814
## intact - partB 0.0445 0.0406 42 1.098 0.6929
## exchange - partA -0.0904 0.0406 42 -2.228 0.1323
## exchange - partB -0.0159 0.0406 42 -0.393 0.9792
## partA - partB 0.0745 0.0406 42 1.835 0.2716
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E2_rFFA1 <- contrast(emm_aov_E2_rFFA1, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E2, interaction = "pairwise") # , adjust = "none"
contr_aov_E2_rFFA1
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.00632 0.0794 29.1 0.080 0.9371
## exchange . English - Chinese -0.13621 0.0794 29.1 -1.716 0.0969
## partA . English - Chinese -0.06054 0.0794 29.1 -0.763 0.4519
## partB . English - Chinese -0.01847 0.0794 29.1 -0.233 0.8177
## . English intact - exchange 0.13175 0.0552 83.5 2.387 0.0192
## . English intact - partA 0.00350 0.0552 83.5 0.063 0.9496
## . English intact - partB 0.05694 0.0552 83.5 1.032 0.3052
## . English exchange - partA -0.12825 0.0552 83.5 -2.324 0.0226
## . English exchange - partB -0.07481 0.0552 83.5 -1.355 0.1789
## . English partA - partB 0.05344 0.0552 83.5 0.968 0.3357
## . Chinese intact - exchange -0.01078 0.0552 83.5 -0.195 0.8456
## . Chinese intact - partA -0.06337 0.0552 83.5 -1.148 0.2542
## . Chinese intact - partB 0.03215 0.0552 83.5 0.582 0.5618
## . Chinese exchange - partA -0.05258 0.0552 83.5 -0.953 0.3434
## . Chinese exchange - partB 0.04293 0.0552 83.5 0.778 0.4389
## . Chinese partA - partB 0.09551 0.0552 83.5 1.731 0.0872
anova_E2_rFFA1_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA1,
Label == label_FFA1[[2]],
Layout %in% c("intact", "exchange")))
anova(anova_E2_rFFA1_ie, "pes")
emm_E2_rFFA1_ie <- emmeans(anova_E2_rFFA1_ie, ~ FaceWord + Layout)
(simple_E2_rFFA1_ie <- pairs(emm_E2_rFFA1_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.00632 0.0835 17.6 0.076 0.9405
## exchange . English - Chinese -0.13621 0.0835 17.6 -1.631 0.1206
## . English intact - exchange 0.13175 0.0535 23.4 2.463 0.0215
## . Chinese intact - exchange -0.01078 0.0535 23.4 -0.202 0.8420
contrast(emm_E2_rFFA1_ie, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## English - Chinese intact - exchange 0.143 0.0565 14 2.523 0.0244
2(face vs. word) \(\times\) 2(top vs. bottom) ANOVA
anova_E2_rFFA1_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA1,
Label == label_FFA1[[2]],
Layout %in% c("partA", "partB")))
anova(anova_E2_rFFA1_tb, "pes")
emm_E2_rFFA1_tb <- emmeans(anova_E2_rFFA1_tb, ~ FaceWord + Layout)
(simple_E2_rFFA1_tb <- pairs(emm_E2_rFFA1_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## partA . English - Chinese -0.0605 0.0751 23.5 -0.806 0.4280
## partB . English - Chinese -0.0185 0.0751 23.5 -0.246 0.8078
## . English partA - partB 0.0534 0.0589 27.8 0.907 0.3723
## . Chinese partA - partB 0.0955 0.0589 27.8 1.621 0.1164
contrast(emm_E2_rFFA1_tb, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## English - Chinese partA - partB -0.0421 0.0798 14 -0.527 0.6063
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_aov_E2_lFFA1))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_FFA1 <- cbind(Hemisphere, rbind(as.data.frame(emm_aov_E2_lFFA1), as.data.frame(emm_aov_E2_rFFA1)))
plot_uni_E2_FFA1 <- plot_uni(desp_uni_E2_FFA1, contr_aov_E2_lFFA1, contr_aov_E2_rFFA1, "FFA1")
# ggsave('plot_uni_E2_FFA1.png', plot_uni_E2_FFA1, width = 10, height = 10)
plot_uni_E2_FFA1
The above figure shows the neural respones (beta values) in FFA1 for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_E2_lFFA1_ie))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_FFA1_ie <- cbind(Hemisphere, rbind(as.data.frame(emm_E2_lFFA1_ie), as.data.frame(emm_E2_rFFA1_ie)))
plot_uni_E2_FFA1_ie <- plot_uni(desp_uni_E2_FFA1_ie, simple_E2_lFFA1_ie, simple_E2_rFFA1_ie, "FFA1", F)
# ggsave('plot_uni_E2_FFA1_ie.png', plot_uni_E2_FFA1_ie, width = 10, height = 5)
plot_uni_E2_FFA1_ie
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_E2_lFFA1_tb))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_FFA1_tb <- cbind(Hemisphere, rbind(as.data.frame(emm_E2_lFFA1_tb), as.data.frame(emm_E2_rFFA1_tb)))
plot_uni_E2_FFA1_tb <- plot_uni(desp_uni_E2_FFA1_tb, simple_E2_lFFA1_tb, simple_E2_rFFA1_tb, "FFA1", F, T)
# ggsave('plot_uni_E2_FFA1_tb.png', plot_uni_E2_FFA1_tb, width = 10, height = 5)
plot_uni_E2_FFA1_tb
# one-sample for results of decode E2 FFA1
one_decode_agg_E2_FFA1 <- {
df_decode_E2_FFA1 %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E2)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E2_FFA1
plot_decode_E2_FFA1 <- plot_decode(one_decode_agg_E2_FFA1, "FFA1")
# ggsave('plot_decode_E2_FFA1.png', plot_decode_E2_FFA1, width = 6.5, height = 16)
plot_decode_E2_FFA1
The above figure shows the decoding accuracy in FFA1 for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
one_simi_E2_FFA1 <- {
df_simi_E2_FFA1 %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E2_FFA1
plot_simi_E2_FFA1 <- plot_simi(one_simi_E2_FFA1, "FFA1")
# ggsave('plot_simi_E2_FFA1.png', plot_simi_E2_FFA1, width = 8, height = 10)
plot_simi_E2_FFA1
The above figure shows the probability of top+bottom being decoded as exchange conditions in FFA1. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
# only keep data for these two labels
df_uni_E2_FFA2 <- filter(df_clean_uni_E2, Label %in% label_FFA2)
df_decode_E2_FFA2 <- filter(df_decode_acc_E2, Label %in% label_FFA2)
df_simi_E2_FFA2 <- filter(df_rate_simi_E2, Label %in% label_FFA2)
df_uni_E2_FFA2 %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E2_lFFA2 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA2, Label == label_FFA2[[1]]))
anova_E2_lFFA2
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 12 0.18 8.65 * .08 .01
## 2 Layout 2.52, 30.24 0.02 0.84 .002 .47
## 3 FaceWord:Layout 2.56, 30.70 0.03 2.83 + .01 .06
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E2_lFFA2 <- emmeans(anova_E2_lFFA2, ~ FaceWord * Layout)
emm_aov_E2_lFFA2 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E2_lFFA2, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## English - Chinese 0.247 0.084 12 2.940 0.0124
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E2_lFFA2, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.01909 0.0343 36 0.556 0.9442
## intact - partA 0.02635 0.0343 36 0.768 0.8683
## intact - partB -0.02327 0.0343 36 -0.678 0.9047
## exchange - partA 0.00726 0.0343 36 0.212 0.9966
## exchange - partB -0.04236 0.0343 36 -1.234 0.6095
## partA - partB -0.04962 0.0343 36 -1.446 0.4799
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E2_lFFA2 <- contrast(emm_aov_E2_lFFA2, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E2, interaction = "pairwise") # , adjust = "none"
contr_aov_E2_lFFA2
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.3426 0.1002 23.0 3.418 0.0024
## exchange . English - Chinese 0.2466 0.1002 23.0 2.460 0.0218
## partA . English - Chinese 0.0986 0.1002 23.0 0.984 0.3355
## partB . English - Chinese 0.2999 0.1002 23.0 2.992 0.0065
## . English intact - exchange 0.0671 0.0564 67.5 1.191 0.2379
## . English intact - partA 0.1484 0.0564 67.5 2.633 0.0105
## . English intact - partB -0.0019 0.0564 67.5 -0.034 0.9732
## . English exchange - partA 0.0813 0.0564 67.5 1.442 0.1539
## . English exchange - partB -0.0690 0.0564 67.5 -1.224 0.2250
## . English partA - partB -0.1503 0.0564 67.5 -2.666 0.0096
## . Chinese intact - exchange -0.0289 0.0564 67.5 -0.513 0.6096
## . Chinese intact - partA -0.0957 0.0564 67.5 -1.697 0.0942
## . Chinese intact - partB -0.0446 0.0564 67.5 -0.792 0.4311
## . Chinese exchange - partA -0.0667 0.0564 67.5 -1.184 0.2404
## . Chinese exchange - partB -0.0157 0.0564 67.5 -0.279 0.7811
## . Chinese partA - partB 0.0510 0.0564 67.5 0.905 0.3685
2(face vs. word) \(\times\) 2(intact vs. exchange) ANOVA
anova_E2_lFFA2_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA2,
Label == label_FFA2[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E2_lFFA2_ie, "pes")
emm_E2_lFFA2_ie <- emmeans(anova_E2_lFFA2_ie, ~ FaceWord + Layout)
(simple_E2_lFFA2_ie <- pairs(emm_E2_lFFA2_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.3426 0.1014 19.6 3.379 0.0030
## exchange . English - Chinese 0.2466 0.1014 19.6 2.432 0.0247
## . English intact - exchange 0.0671 0.0607 19.7 1.105 0.2825
## . Chinese intact - exchange -0.0289 0.0607 19.7 -0.476 0.6392
contrast(emm_E2_lFFA2_ie, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## English - Chinese intact - exchange 0.096 0.104 12 0.924 0.3738
2(face vs. word) \(\times\) 2(top vs. bottom) ANOVA
anova_E2_lFFA2_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA2,
Label == label_FFA2[[1]],
Layout %in% c("partA", "partB")))
anova(anova_E2_lFFA2_tb, "pes")
emm_E2_lFFA2_tb <- emmeans(anova_E2_lFFA2_tb, ~ FaceWord + Layout)
(simple_E2_lFFA2_tb <- pairs(emm_E2_lFFA2_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## partA . English - Chinese 0.0986 0.0991 19.0 0.995 0.3321
## partB . English - Chinese 0.2999 0.0991 19.0 3.027 0.0069
## . English partA - partB -0.1503 0.0643 23.4 -2.336 0.0284
## . Chinese partA - partB 0.0510 0.0643 23.4 0.793 0.4357
contrast(emm_E2_lFFA2_tb, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## English - Chinese partA - partB -0.201 0.098 12 -2.053 0.0625
anova_E2_rFFA2 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA2, Label == label_FFA2[[2]]))
anova_E2_rFFA2
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 17 0.05 0.00 <.0001 .99
## 2 Layout 2.55, 43.43 0.01 0.23 .0009 .85
## 3 FaceWord:Layout 2.28, 38.70 0.02 0.72 .005 .51
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E2_rFFA2 <- emmeans(anova_E2_rFFA2, ~ FaceWord * Layout)
emm_aov_E2_rFFA2 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E2_rFFA2, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## English - Chinese 0.000317 0.0355 17 0.009 0.9930
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E2_rFFA2, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.01582 0.0232 51 0.682 0.9034
## intact - partA -0.00143 0.0232 51 -0.062 0.9999
## intact - partB 0.00305 0.0232 51 0.131 0.9992
## exchange - partA -0.01725 0.0232 51 -0.744 0.8789
## exchange - partB -0.01277 0.0232 51 -0.551 0.9459
## partA - partB 0.00447 0.0232 51 0.193 0.9974
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E2_rFFA2 <- contrast(emm_aov_E2_rFFA2, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E2, interaction = "pairwise") # , adjust = "none"
contr_aov_E2_rFFA2
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.04175 0.0529 56.1 0.789 0.4335
## exchange . English - Chinese -0.00244 0.0529 56.1 -0.046 0.9634
## partA . English - Chinese -0.05021 0.0529 56.1 -0.949 0.3468
## partB . English - Chinese 0.01216 0.0529 56.1 0.230 0.8190
## . English intact - exchange 0.03791 0.0396 92.9 0.958 0.3403
## . English intact - partA 0.04455 0.0396 92.9 1.126 0.2630
## . English intact - partB 0.01784 0.0396 92.9 0.451 0.6531
## . English exchange - partA 0.00664 0.0396 92.9 0.168 0.8671
## . English exchange - partB -0.02007 0.0396 92.9 -0.507 0.6130
## . English partA - partB -0.02671 0.0396 92.9 -0.675 0.5012
## . Chinese intact - exchange -0.00628 0.0396 92.9 -0.159 0.8743
## . Chinese intact - partA -0.04741 0.0396 92.9 -1.198 0.2338
## . Chinese intact - partB -0.01175 0.0396 92.9 -0.297 0.7672
## . Chinese exchange - partA -0.04113 0.0396 92.9 -1.040 0.3011
## . Chinese exchange - partB -0.00547 0.0396 92.9 -0.138 0.8903
## . Chinese partA - partB 0.03566 0.0396 92.9 0.901 0.3697
anova_E2_rFFA2_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA2,
Label == label_FFA2[[2]],
Layout %in% c("intact", "exchange")))
anova(anova_E2_rFFA2_ie, "pes")
emm_E2_rFFA2_ie <- emmeans(anova_E2_rFFA2_ie, ~ FaceWord + Layout)
(simple_E2_rFFA2_ie <- pairs(emm_E2_rFFA2_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.04175 0.0560 28.9 0.746 0.4619
## exchange . English - Chinese -0.00244 0.0560 28.9 -0.044 0.9656
## . English intact - exchange 0.03791 0.0409 33.8 0.927 0.3607
## . Chinese intact - exchange -0.00628 0.0409 33.8 -0.153 0.8790
contrast(emm_E2_rFFA2_ie, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## English - Chinese intact - exchange 0.0442 0.0602 17 0.735 0.4726
2(face vs. word) \(\times\) 2(top vs. bottom) ANOVA
anova_E2_rFFA2_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA2,
Label == label_FFA2[[2]],
Layout %in% c("partA", "partB")))
anova(anova_E2_rFFA2_tb, "pes")
emm_E2_rFFA2_tb <- emmeans(anova_E2_rFFA2_tb, ~ FaceWord + Layout)
(simple_E2_rFFA2_tb <- pairs(emm_E2_rFFA2_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## partA . English - Chinese -0.0502 0.0497 33.5 -1.011 0.3192
## partB . English - Chinese 0.0122 0.0497 33.5 0.245 0.8080
## . English partA - partB -0.0267 0.0420 25.9 -0.635 0.5308
## . Chinese partA - partB 0.0357 0.0420 25.9 0.848 0.4042
contrast(emm_E2_rFFA2_tb, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## English - Chinese partA - partB -0.0624 0.0742 17 -0.840 0.4125
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_aov_E2_lFFA2))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_FFA2 <- cbind(Hemisphere, rbind(as.data.frame(emm_aov_E2_lFFA2), as.data.frame(emm_aov_E2_rFFA2)))
plot_uni_E2_FFA2 <- plot_uni(desp_uni_E2_FFA2, contr_aov_E2_lFFA2, contr_aov_E2_rFFA2, "FFA2")
# ggsave('plot_uni_E2_FFA2.png', plot_uni_E2_FFA2, width = 10, height = 10)
plot_uni_E2_FFA2
The above figure shows the neural respones (beta values) in FFA2 for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_E2_lFFA2_ie))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_FFA2_ie <- cbind(Hemisphere, rbind(as.data.frame(emm_E2_lFFA2_ie), as.data.frame(emm_E2_rFFA2_ie)))
plot_uni_E2_FFA2_ie <- plot_uni(desp_uni_E2_FFA2_ie, simple_E2_lFFA2_ie, simple_E2_rFFA2_ie, "FFA2", F)
# ggsave('plot_uni_E2_FFA2_ie.png', plot_uni_E2_FFA2_ie, width = 10, height = 5)
plot_uni_E2_FFA2_ie
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_E2_lFFA2_tb))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_FFA2_tb <- cbind(Hemisphere, rbind(as.data.frame(emm_E2_lFFA2_tb), as.data.frame(emm_E2_rFFA2_tb)))
plot_uni_E2_FFA2_tb <- plot_uni(desp_uni_E2_FFA2_tb, simple_E2_lFFA2_tb, simple_E2_rFFA2_tb, "FFA2", F, T)
# ggsave('plot_uni_E2_FFA2_tb.png', plot_uni_E2_FFA2_tb, width = 10, height = 5)
plot_uni_E2_FFA2_tb
# one-sample for results of decode E2 FFA2
one_decode_agg_E2_FFA2 <- {
df_decode_E2_FFA2 %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E2)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E2_FFA2
plot_decode_E2_FFA2 <- plot_decode(one_decode_agg_E2_FFA2, "FFA2")
# ggsave('plot_decode_E2_FFA2.png', plot_decode_E2_FFA2, width = 6.5, height = 16)
plot_decode_E2_FFA2
The above figure shows the decoding accuracy in FFA2 for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
# Similarity of top + bottom to intact vs. exchange in FFA
one_simi_E2_FFA2 <- {
df_simi_E2_FFA2 %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E2_FFA2
plot_simi_E2_FFA2 <- plot_simi(one_simi_E2_FFA2, "FFA2")
# ggsave('plot_simi_E2_FFA2.png', plot_simi_E2_FFA2, width = 8, height = 10)
plot_simi_E2_FFA2
The above figure shows the probability of top+bottom being decoded as exchange conditions in FFA2. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
# only keep data for these two labels
df_uni_E2_VWFA <- filter(df_clean_uni_E2, Label %in% label_VWFA)
df_decode_E2_VWFA <- filter(df_decode_acc_E2, Label %in% label_VWFA)
df_simi_E2_VWFA <- filter(df_rate_simi_E2, Label %in% label_VWFA)
# subjects used for each hemisphere
# unique(as.character((df_univar_agg_E2_VWFA %>% filter(Label == label_VWFA_E2))$SubjCode))
df_uni_E2_VWFA %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E2_VWFA <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_VWFA, Label == label_VWFA))
anova_E2_VWFA
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 13 0.26 66.19 *** .35 <.0001
## 2 Layout 2.25, 29.19 0.03 10.51 *** .02 .0002
## 3 FaceWord:Layout 1.62, 21.06 0.06 9.23 ** .03 .002
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E2_VWFA <- emmeans(anova_E2_VWFA, ~ FaceWord * Layout)
emm_aov_E2_VWFA %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E2_VWFA, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## English - Chinese 0.786 0.0966 13 8.135 <.0001
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E2_VWFA, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange -0.1739 0.0366 39 -4.748 0.0002
## intact - partA -0.0150 0.0366 39 -0.411 0.9763
## intact - partB -0.1217 0.0366 39 -3.323 0.0101
## exchange - partA 0.1588 0.0366 39 4.337 0.0006
## exchange - partB 0.0522 0.0366 39 1.425 0.4917
## partA - partB -0.1066 0.0366 39 -2.912 0.0289
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E2_VWFA <- contrast(emm_aov_E2_VWFA, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_aov_E2, interaction = "pairwise") # , adjust = "none"
contr_aov_E2_VWFA
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.7486 0.1124 22.9 6.662 <.0001
## exchange . English - Chinese 0.9659 0.1124 22.9 8.596 <.0001
## partA . English - Chinese 0.5173 0.1124 22.9 4.604 0.0001
## partB . English - Chinese 0.9114 0.1124 22.9 8.111 <.0001
## . English intact - exchange -0.2825 0.0595 73.7 -4.749 <.0001
## . English intact - partA 0.1006 0.0595 73.7 1.692 0.0950
## . English intact - partB -0.2031 0.0595 73.7 -3.414 0.0010
## . English exchange - partA 0.3831 0.0595 73.7 6.441 <.0001
## . English exchange - partB 0.0794 0.0595 73.7 1.335 0.1859
## . English partA - partB -0.3037 0.0595 73.7 -5.106 <.0001
## . Chinese intact - exchange -0.0652 0.0595 73.7 -1.096 0.2766
## . Chinese intact - partA -0.1307 0.0595 73.7 -2.197 0.0312
## . Chinese intact - partB -0.0403 0.0595 73.7 -0.677 0.5006
## . Chinese exchange - partA -0.0655 0.0595 73.7 -1.101 0.2746
## . Chinese exchange - partB 0.0249 0.0595 73.7 0.419 0.6762
## . Chinese partA - partB 0.0904 0.0595 73.7 1.520 0.1328
2(face vs. word) \(\times\) 2(intact vs. exchange) ANOVA
anova_E2_VWFA_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_VWFA,
Label == label_VWFA[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E2_VWFA_ie, "pes")
emm_E2_VWFA_ie <- emmeans(anova_E2_VWFA_ie, ~ FaceWord + Layout)
(simple_E2_VWFA_ie <- pairs(emm_E2_VWFA_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.7486 0.1161 18.2 6.448 <.0001
## exchange . English - Chinese 0.9659 0.1161 18.2 8.319 <.0001
## . English intact - exchange -0.2825 0.0625 25.1 -4.521 0.0001
## . Chinese intact - exchange -0.0652 0.0625 25.1 -1.043 0.3067
contrast(emm_E2_VWFA_ie, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## English - Chinese intact - exchange -0.217 0.0964 13 -2.255 0.0420
2(face vs. word) \(\times\) 2(top vs. bottom) ANOVA
anova_E2_VWFA_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_VWFA,
Label == label_VWFA[[1]],
Layout %in% c("partA", "partB")))
anova(anova_E2_VWFA_tb, "pes")
emm_E2_VWFA_tb <- emmeans(anova_E2_VWFA_tb, ~ FaceWord + Layout)
(simple_E2_VWFA_tb <- pairs(emm_E2_VWFA_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## partA . English - Chinese 0.5173 0.1085 22.3 4.768 0.0001
## partB . English - Chinese 0.9114 0.1085 22.3 8.401 <.0001
## . English partA - partB -0.3037 0.0693 21.6 -4.383 0.0002
## . Chinese partA - partB 0.0904 0.0693 21.6 1.305 0.2057
contrast(emm_E2_VWFA_tb, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## English - Chinese partA - partB -0.394 0.118 13 -3.338 0.0053
nRow_E2 <-nrow(as.data.frame(emm_aov_E2_VWFA))
Hemisphere <- c(rep("left", nRow_E2))
desp_uni_E2_VWFA <- cbind(Hemisphere, as.data.frame(emm_aov_E2_VWFA))
plot_uni_E2_VWFA <- plot_uni_vwfa(desp_uni_E2_VWFA, contr_aov_E2_VWFA, "VWFA")
# ggsave('plot_uni_E2_VWFA.png', plot_uni_E2_VWFA, width = 5.5, height = 10)
plot_uni_E2_VWFA
The above figure shows the neural respones (beta values) in VWFA for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: *, p < .05
nRow_E2 <-nrow(as.data.frame(emm_E2_VWFA_ie))
Hemisphere <- c(rep("left", nRow_E2))
desp_uni_E2_VWFA_ie <- cbind(Hemisphere, as.data.frame(emm_E2_VWFA_ie))
plot_uni_E2_VWFA_ie <- plot_uni_vwfa(desp_uni_E2_VWFA_ie, simple_E2_VWFA_tb, "VWFA", FALSE)
# ggsave('plot_uni_E2_VWFA_ie.png', plot_uni_E2_VWFA_ie, width = 5.5, height = 10)
plot_uni_E2_VWFA_ie
nRow_E2 <-nrow(as.data.frame(emm_E2_VWFA_tb))
Hemisphere <- c(rep("left", nRow_E2))
desp_uni_E2_VWFA_tb <- cbind(Hemisphere, as.data.frame(emm_E2_VWFA_tb))
plot_uni_E2_VWFA_tb <- plot_uni_vwfa(desp_uni_E2_VWFA_tb, simple_E2_VWFA_tb, "VWFA", FALSE, T)
# ggsave('plot_uni_E2_VWFA_tb.png', plot_uni_E2_VWFA_tb, width = 5.5, height = 10)
plot_uni_E2_VWFA_tb
# one-sample for results of decode E2 VWFA
one_decode_agg_E2_VWFA <- {
df_decode_E2_VWFA %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E2)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E2_VWFA
plot_decode_E2_VWFA <- plot_decode_vwfa(one_decode_agg_E2_VWFA, "VWFA")
# ggsave('plot_decode_E2_VWFA.png', plot_decode_E2_VWFA, width = 4, height = 16)
plot_decode_E2_VWFA
The above figure shows the decoding accuracy in VWFA for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: ***, p <.001
# Similarity of top + bottom to intact vs. exchange in VWFA
one_simi_E2_VWFA <- {
df_simi_E2_VWFA %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E2_VWFA
plot_simi_E2_VWFA <- plot_simi_vwfa(one_simi_E2_VWFA, "VWFA")
# ggsave('plot_simi_E2_VWFA.png', plot_simi_E2_VWFA, width = 4.25, height = 10)
plot_simi_E2_VWFA
The above figure shows the probability of top+bottom being decoded as exchange conditions in VWFA. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
# only keep data for these two labels
df_uni_E2_LO <- filter(df_clean_uni_E2, Label %in% label_LO)
df_decode_E2_LO <- filter(df_decode_acc_E2, Label %in% label_LO)
df_simi_E2_LO <- filter(df_rate_simi_E2, Label %in% label_LO)
df_uni_E2_LO %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E2_lLO <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_LO, Label == label_LO[[1]]))
anova_E2_lLO
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 15 0.19 4.52 + .01 .05
## 2 Layout 1.75, 26.31 0.05 1.33 .002 .28
## 3 FaceWord:Layout 2.06, 30.90 0.07 1.75 .004 .19
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E2_lLO <- emmeans(anova_E2_lLO, ~ FaceWord * Layout)
emm_aov_E2_lLO %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E2_lLO, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## English - Chinese -0.163 0.0768 15 -2.126 0.0506
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E2_lLO, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange -0.05319 0.0414 45 -1.284 0.5779
## intact - partA -0.08137 0.0414 45 -1.964 0.2170
## intact - partB -0.04894 0.0414 45 -1.181 0.6419
## exchange - partA -0.02818 0.0414 45 -0.680 0.9042
## exchange - partB 0.00426 0.0414 45 0.103 0.9996
## partA - partB 0.03244 0.0414 45 0.783 0.8619
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E2_lLO <- contrast(emm_aov_E2_lLO, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_aov_E2, interaction = "pairwise") # , adjust = "none"
contr_aov_E2_lLO
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese -0.1079 0.1006 37.8 -1.073 0.2903
## exchange . English - Chinese -0.0773 0.1006 37.8 -0.768 0.4470
## partA . English - Chinese -0.3014 0.1006 37.8 -2.996 0.0048
## partB . English - Chinese -0.1661 0.1006 37.8 -1.651 0.1070
## . English intact - exchange -0.0685 0.0673 85.0 -1.017 0.3120
## . English intact - partA 0.0154 0.0673 85.0 0.228 0.8201
## . English intact - partB -0.0198 0.0673 85.0 -0.295 0.7691
## . English exchange - partA 0.0838 0.0673 85.0 1.245 0.2164
## . English exchange - partB 0.0487 0.0673 85.0 0.723 0.4719
## . English partA - partB -0.0352 0.0673 85.0 -0.523 0.6025
## . Chinese intact - exchange -0.0379 0.0673 85.0 -0.563 0.5750
## . Chinese intact - partA -0.1781 0.0673 85.0 -2.645 0.0097
## . Chinese intact - partB -0.0780 0.0673 85.0 -1.159 0.2497
## . Chinese exchange - partA -0.1402 0.0673 85.0 -2.082 0.0403
## . Chinese exchange - partB -0.0401 0.0673 85.0 -0.596 0.5527
## . Chinese partA - partB 0.1001 0.0673 85.0 1.486 0.1409
2(face vs. word) \(\times\) 2(intact vs. exchange) ANOVA
anova_E2_lLO_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_LO,
Label == label_LO[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E2_lLO_ie, "pes")
emm_E2_lLO_ie <- emmeans(anova_E2_lLO_ie, ~ FaceWord + Layout)
(simple_E2_lLO_ie <- pairs(emm_E2_lLO_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese -0.1079 0.0962 21.3 -1.122 0.2745
## exchange . English - Chinese -0.0773 0.0962 21.3 -0.804 0.4305
## . English intact - exchange -0.0685 0.0727 26.4 -0.942 0.3547
## . Chinese intact - exchange -0.0379 0.0727 26.4 -0.521 0.6065
contrast(emm_E2_lLO_ie, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## English - Chinese intact - exchange -0.0306 0.0818 15 -0.374 0.7139
2(face vs. word) \(\times\) 2(top vs. bottom) ANOVA
anova_E2_lLO_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_LO,
Label == label_LO[[1]],
Layout %in% c("partA", "partB")))
anova(anova_E2_lLO_tb, "pes")
emm_E2_lLO_tb <- emmeans(anova_E2_lLO_tb, ~ FaceWord + Layout)
(simple_E2_lLO_tb <- pairs(emm_E2_lLO_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## partA . English - Chinese -0.3014 0.105 29.6 -2.875 0.0074
## partB . English - Chinese -0.1661 0.105 29.6 -1.585 0.1236
## . English partA - partB -0.0352 0.075 19.5 -0.470 0.6439
## . Chinese partA - partB 0.1001 0.075 19.5 1.335 0.1973
contrast(emm_E2_lLO_tb, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## English - Chinese partA - partB -0.135 0.14 15 -0.968 0.3482
aov_E2_rLO <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_LO, Label == label_LO[[2]]))
aov_E2_rLO
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 16 0.15 44.67 *** .03 <.0001
## 2 Layout 2.30, 36.79 0.07 3.03 + .002 .05
## 3 FaceWord:Layout 2.18, 34.93 0.04 1.05 .0005 .36
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E2_rLO <- emmeans(aov_E2_rLO, ~ FaceWord * Layout)
emm_aov_E2_rLO %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E2_rLO, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## English - Chinese -0.438 0.0655 16 -6.684 <.0001
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E2_rLO, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.08614 0.0568 48 1.516 0.4360
## intact - partA -0.08487 0.0568 48 -1.494 0.4491
## intact - partB -0.00697 0.0568 48 -0.123 0.9993
## exchange - partA -0.17101 0.0568 48 -3.010 0.0209
## exchange - partB -0.09311 0.0568 48 -1.639 0.3671
## partA - partB 0.07790 0.0568 48 1.371 0.5233
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E2_rLO <- contrast(emm_aov_E2_rLO, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_aov_E2, interaction = "pairwise") # , adjust = "none"
contr_aov_E2_rLO
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese -0.3835 0.0837 37.6 -4.584 <.0001
## exchange . English - Chinese -0.5192 0.0837 37.6 -6.206 <.0001
## partA . English - Chinese -0.3966 0.0837 37.6 -4.741 <.0001
## partB . English - Chinese -0.4507 0.0837 37.6 -5.388 <.0001
## . English intact - exchange 0.1540 0.0710 88.9 2.170 0.0327
## . English intact - partA -0.0783 0.0710 88.9 -1.103 0.2729
## . English intact - partB 0.0267 0.0710 88.9 0.376 0.7081
## . English exchange - partA -0.2323 0.0710 88.9 -3.273 0.0015
## . English exchange - partB -0.1273 0.0710 88.9 -1.794 0.0762
## . English partA - partB 0.1050 0.0710 88.9 1.479 0.1427
## . Chinese intact - exchange 0.0183 0.0710 88.9 0.257 0.7975
## . Chinese intact - partA -0.0914 0.0710 88.9 -1.288 0.2010
## . Chinese intact - partB -0.0406 0.0710 88.9 -0.572 0.5687
## . Chinese exchange - partA -0.1097 0.0710 88.9 -1.546 0.1257
## . Chinese exchange - partB -0.0589 0.0710 88.9 -0.830 0.4090
## . Chinese partA - partB 0.0508 0.0710 88.9 0.716 0.4757
anova_E2_rLO_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_LO,
Label == label_LO[[2]],
Layout %in% c("intact", "exchange")))
anova(anova_E2_rLO_ie, "pes")
emm_E2_rLO_ie <- emmeans(anova_E2_rLO_ie, ~ FaceWord + Layout)
(simple_E2_rLO_ie <- pairs(emm_E2_rLO_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese -0.3835 0.0832 25.8 -4.607 0.0001
## exchange . English - Chinese -0.5192 0.0832 25.8 -6.238 <.0001
## . English intact - exchange 0.1540 0.0712 29.3 2.164 0.0388
## . Chinese intact - exchange 0.0183 0.0712 29.3 0.257 0.7993
contrast(emm_E2_rLO_ie, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## English - Chinese intact - exchange 0.136 0.0839 16 1.617 0.1254
2(face vs. word) \(\times\) 2(top vs. bottom) ANOVA
anova_E2_rLO_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_LO,
Label == label_LO[[2]],
Layout %in% c("partA", "partB")))
anova(anova_E2_rLO_tb, "pes")
emm_E2_rLO_tb <- emmeans(anova_E2_rLO_tb, ~ FaceWord + Layout)
(simple_E2_rLO_tb <- pairs(emm_E2_rLO_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## partA . English - Chinese -0.3966 0.0841 29.6 -4.717 0.0001
## partB . English - Chinese -0.4507 0.0841 29.6 -5.361 <.0001
## . English partA - partB 0.1050 0.0629 29.6 1.669 0.1056
## . Chinese partA - partB 0.0508 0.0629 29.6 0.808 0.4254
contrast(emm_E2_rLO_tb, interaction = "pairwise")
## FaceWord_pairwise Layout_pairwise estimate SE df t.ratio p.value
## English - Chinese partA - partB 0.0541 0.101 16 0.537 0.5984
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_aov_E2_lLO))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_LO <- cbind(Hemisphere, rbind(as.data.frame(emm_aov_E2_lLO), as.data.frame(emm_aov_E2_rLO)))
plot_uni_E2_LO <- plot_uni(desp_uni_E2_LO, contr_aov_E2_lLO, contr_aov_E2_rLO, "LO")
# ggsave('plot_uni_E2_LO.png', plot_uni_E2_LO, width = 10, height = 10)
plot_uni_E2_LO
The above figure shows the neural respones (beta values) in LO for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: *, p < .05
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_E2_lLO_ie))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_LO_ie <- cbind(Hemisphere, rbind(as.data.frame(emm_E2_lLO_ie), as.data.frame(emm_E2_rLO_ie)))
plot_uni_E2_LO_ie <- plot_uni(desp_uni_E2_LO_ie, simple_E2_lLO_ie, simple_E2_rLO_ie, "LO", F)
# ggsave('plot_uni_E2_LO_ie.png', plot_uni_E2_LO_ie, width = 10, height = 5)
plot_uni_E2_LO_ie
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_E2_lLO_tb))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_LO_tb <- cbind(Hemisphere, rbind(as.data.frame(emm_E2_lLO_tb), as.data.frame(emm_E2_rLO_tb)))
plot_uni_E2_LO_tb <- plot_uni(desp_uni_E2_LO_tb, simple_E2_lLO_tb, simple_E2_rLO_tb, "LO", F, T)
# ggsave('plot_uni_E2_LO_tb.png', plot_uni_E2_LO_tb, width = 10, height = 5)
plot_uni_E2_LO_tb
# one-sample for results of decode E2 LO
one_decode_agg_E2_LO <- {
df_decode_E2_LO %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E2)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E2_LO
plot_decode_E2_LO <- plot_decode(one_decode_agg_E2_LO, "LO")
# ggsave('plot_decode_E2_LO.png', plot_decode_E2_LO, width = 6.5, height = 16)
plot_decode_E2_LO
The above figure shows the decoding accuracy in LO for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: , p < .01; *, p <.001
# Similarity of top + bottom to intact vs. exchange in LO
one_simi_E2_LO <- {
df_simi_E2_LO %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E2_LO
plot_simi_E2_LO <- plot_simi(one_simi_E2_LO, "LO")
# ggsave('plot_simi_E2_LO.png', plot_simi_E2_LO, width = 8, height = 10)
plot_simi_E2_LO
The above figure shows the probability of top+bottom being decoded as exchange conditions in LO. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
Labels for LO were defined with the maximum area of 100, 150, 200 and 300 mm^2, respecitvely.
df_label_LO_E1 <- read_csv(file.path("data", "faceword_E1_Label_LO_HJ.csv")) %>%
mutate(roi = str_remove(Label, "roi."),
roi = str_remove(roi, ".label")) %>%
mutate(Subject = str_replace(SubjCode, "\\_.*", ""))
# df_label %>% head()
df_label_LO_E1 %>%
select(SubjCode, roi, Size) %>%
pivot_wider(names_from = roi, values_from = Size) %>%
arrange(SubjCode)
The above table displays the size (in mm2) of each label for each participant. (NA denotes that this label is not available for that particiapnt.)
df_label_LO_E1 %>%
select(SubjCode, roi, NVtxs) %>%
pivot_wider(names_from = roi, values_from = NVtxs) %>%
arrange(SubjCode)
The above table displays the number of vertices for each label and each participant. (NA denotes that this label is not available for that particiapnt.)
df_label_LO_E1 %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
meanNVtx = mean(NVtxs))
df_nlabel_LO_E1 <- df_label_LO_E1 %>%
filter(Size > nVtx_size_min) %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
meanNVtx = mean(NVtxs))
df_nlabel_LO_E1
The above table dispalys the number of participants included in the following analyses for each ROI. (VWFA is only found on the left hemisphere.)
# load decoding results in LO
df_LO_area_E1 <- read_csv(file.path("data", "faceword_E1_Decode_LO_noz.csv")) %>%
select(Label, SessCode, ClassifyPair, ACC) %>%
mutate(Hemisphere = if_else(grepl("lh", Label), "left",
if_else(grepl("rh", Label), "right", "NA")),
Subject = str_remove(SessCode, "\\_.*")) %>%
left_join(df_label_LO_E1, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
df_decode_LO_acc_E1 <- df_LO_area_E1 %>%
group_by(Hemisphere, Label, SessCode, ClassifyPair) %>% # divide the data into groups by these columns
summarize(Accuracy = mean(ACC), Count = n()) %>%
ungroup()
df_decode_LO_acc_E1
# one-sample for results of decode E1 LO
one_decode_agg_E1_LO_area <- {
df_decode_LO_acc_E1 %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E1)) %>%
group_by(Hemisphere, ClassifyPair, Label) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E1_LO_area
df_label_LO_E2 <- read_csv(file.path("data", "faceword_E2_Label_LO_HJ.csv")) %>%
mutate(roi = str_remove(Label, "roi."),
roi = str_remove(roi, ".label")) %>%
mutate(Subject = str_replace(SubjCode, "\\_.*", ""))
# df_label %>% head()
df_label_LO_E2 %>%
select(SubjCode, roi, Size) %>%
pivot_wider(names_from = roi, values_from = Size) %>%
arrange(SubjCode)
The above table displays the size (in mm2) of each label for each participant. (NA denotes that this label is not available for that particiapnt.)
df_label_LO_E2 %>%
select(SubjCode, roi, NVtxs) %>%
pivot_wider(names_from = roi, values_from = NVtxs) %>%
arrange(SubjCode)
The above table displays the number of vertices for each label and each participant. (NA denotes that this label is not available for that particiapnt.)
df_label_LO_E2 %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
meanNVtx = mean(NVtxs))
df_nlabel_LO_E2 <- df_label_LO_E2 %>%
filter(Size > nVtx_size_min) %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
meanNVtx = mean(NVtxs))
df_nlabel_LO_E2
The above table dispalys the number of participants included in the following analyses for each ROI. (VWFA is only found on the left hemisphere.)
# load decoding results in LO
df_LO_area_E2 <- read_csv(file.path("data", "faceword_E2_Decode_LO_noz.csv")) %>%
select(Label, SessCode, ClassifyPair, ACC) %>%
mutate(Hemisphere = if_else(grepl("lh", Label), "left",
if_else(grepl("rh", Label), "right", "NA")),
Subject = str_remove(SessCode, "\\_.*"),
ClassifyPair = fct_recode(ClassifyPair,
`Chinese_partA-Chinese_partB` = "Chinese_top-Chinese_bottom",
`English_partA-English_partB` = "English_top-English_bottom"),
ClassifyPair = factor(ClassifyPair, levels = pair_order_E2)) %>%
left_join(df_label_LO_E2, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
df_decode_LO_acc_E2 <- df_LO_area_E2 %>%
group_by(Hemisphere, Label, SessCode, ClassifyPair) %>% # divide the data into groups by these columns
summarize(Accuracy = mean(ACC), Count = n()) %>%
ungroup()
df_decode_LO_acc_E2
# one-sample for results of decode E2 LO
one_decode_agg_E2_LO_area <- {
df_decode_LO_acc_E2 %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E2)) %>%
group_by(Hemisphere, ClassifyPair, Label) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E2_LO_area
df_decode_LO_area <- rbind(mutate(one_decode_agg_E1_LO_area, Exp = "E1"),
mutate(one_decode_agg_E2_LO_area, Exp = "E2")) %>%
separate(ClassifyPair, c("Stimuli1", "Layout1", "Stimuli2", "Layout2")) %>%
filter(!(Label %in% c("roi.lh.o-vs-scr.label", "roi.rh.o-vs-scr.label"))) %>%
mutate(Stimuli1 = if_else(Stimuli1 %in% c("face", "word"), paste0(Stimuli1, "s"), Stimuli1),
Stimuli2 = if_else(Stimuli2 %in% c("face", "word"), paste0(Stimuli2, "s"), Stimuli2),
Stimuli = ifelse(Stimuli1 == Stimuli2, Stimuli1,
paste(Stimuli1, Stimuli2, sep = "\nvs.\n")),
Layout = ifelse(Layout1 == Layout2, Layout1,
paste(toTitleCase(Layout1), toTitleCase(Layout2), sep = "\nvs.\n")),
Area = substr(Label, 18, 20)) %>%
select(-c(Stimuli1, Stimuli2, Layout1, Layout2))
# save the df for intact English vs. intact Chinese
df_intact_LO_area <- filter(df_decode_LO_area, Layout == "intact") %>%
mutate(Stimuli = fct_rev(Stimuli))
dat_text_intact_LO_area <- data.frame(
Stimuli = c("faces\nvs.\nwords", "English\nvs.\nChinese"),
Hemisphere = c("left"),
label = c("Chinese speakers: \nfaces vs. Chinese characters",
"English speakers: \nEnglish words vs. Chinese characters"),
x = .5, # c(1.35, 1.6),
y = 1.05
)
# intact
plot_intact_LO_area <- ggplot(df_intact_LO_area, aes(y = mean, x = Area)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = expression(paste("Lateral Occipital Label Area (", mm^2, ")")), y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_intact_LO_area, aes(x = x, y = y, label = label), size = 4, fontface = "bold", hjust=0) + #
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_decode_LO_area_intact.pdf', plot_intact_LO_area, width = 8, height = 8)
plot_intact_LO_area
df_inex_LO_area <- df_decode_LO_area %>%
filter(str_detect(Stimuli, "vs.", negate = TRUE),
Layout == "Intact\nvs.\nExchange")
df_inex_LO_area$Stimuli <- fct_relevel(df_inex_LO_area$Stimuli, "English", after = Inf)
df_inex_LO_area$Stimuli <- fct_relevel(df_inex_LO_area$Stimuli, "Chinese", after = Inf)
dat_text_inex_LO_area <- data.frame(
Stimuli = levels(df_inex_LO_area$Stimuli),
Hemisphere = c("left"),
label = c("Chinese speakers: \nfaces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_inex_LO_area$Stimuli),
x = .5, # c(1, 1.1, 1.2, 1.2),
y = 1.05
)
plot_inex_LO_area <- ggplot(df_inex_LO_area, aes(y = mean, x = Area)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(title = "Intact vs. Exchange", x = expression(paste("Lateral Occipital Label Area (", mm^2, ")")), y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_LO_area, mapping = aes(x = x, y = y, label = label), size = 6, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 13), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_decode_LO_area_inex.pdf', plot_inex_LO_area, width = 8, height = 16)
plot_inex_LO_area
dat_text_inex_LO_area_lr_fw <- data.frame(
Stimuli = levels(df_inex_LO_area$Stimuli)[1:2],
Hemisphere = "left",
label = c("Chinese speakers: \nfaces",
"Chinese speakers: \nChinese characters"),
x = .5, # c(1, 1.1),
y = 1.05
)
plot_inex_LO_area_E1 <- ggplot(filter(df_inex_LO_area, Exp == "E1"), aes(y = mean, x = Area)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = expression(paste("Lateral Occipital Label Area (", mm^2, ")")), y = "Accuracy") + # set the names for main, x and y axises Experiment 1
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_LO_area_lr_fw, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
dat_text_inex_LO_area_lr_ec <- data.frame(
Stimuli = levels(df_inex_LO_area$Stimuli)[3:4],
Hemisphere = "left",
label = c("English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_inex_LO_area$Stimuli)[3:4],
x = .5, # c(1.1, 1.2),
y = 1.05
)
plot_inex_LO_area_E2 <- ggplot(filter(df_inex_LO_area, Exp == "E2"), aes(y = mean, x = Area)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = expression(paste("Lateral Occipital Label Area (", mm^2, ")")), y = "Accuracy") + # set the names for main, x and y axises Experiment 2
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_LO_area_lr_ec, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
plot_inex_LO_area_lr <- ggarrange(plot_inex_LO_area_E1, plot_inex_LO_area_E2, ncol = 2,
# labels = c("", "Intact vs. Exchange"),
# label.x = -0.36,
# label.y = 1,
font.label = list(size = 24))
# ggsave('plot_decode_LO_area_inex_lr.pdf', plot_inex_LO_area_lr, width = 15, height = 8)
plot_inex_LO_area_lr
df_parts_LO_area <- df_decode_LO_area %>%
filter(str_detect(Stimuli, "vs.", negate = TRUE),
Layout != "Intact\nvs.\nExchange") %>%
mutate(Stimuli = as_factor(Stimuli))
df_parts_LO_area$Stimuli <- fct_relevel(df_parts_LO_area$Stimuli, "English", after = Inf)
df_parts_LO_area$Stimuli <- fct_relevel(df_parts_LO_area$Stimuli, "Chinese", after = Inf)
dat_parts_inex_LO_area <- data.frame(
Stimuli = levels(df_parts_LO_area$Stimuli),
Hemisphere = c("left"),
label = c("Chinese speakers: \nfaces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"),
x = .5, # c(1, 1.1, 1.2, 1.2),
y = 1.05
)
plot_topbottom_LO_area <- ggplot(df_parts_LO_area, aes(y = mean, x = Area)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(title = "Top vs. bottom; left vs. right", x = expression(paste("Lateral Occipital Label Area (", mm^2, ")")), y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_parts_inex_LO_area, mapping = aes(x = x, y = y, label = label), size = 6, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_decode_LO_area_topbottom.pdf', plot_topbottom_LO_area, width = 8, height = 16)
plot_topbottom_LO_area
dat_text_tb_all_lr_E1 <- data.frame(
Stimuli = levels(df_parts_LO_area$Stimuli)[1:2],
Hemisphere = "left",
label = c("Chinese speakers: \nfaces",
"Chinese speakers: \nChinese characters"), # levels(df_parts_LO_area$Stimuli)[1:2],
x = .5, # c(1, 1.1),
y = 1.05
)
plot_topbottom_LO_area_E1 <- ggplot(filter(df_parts_LO_area, Exp == "E1"), aes(y = mean, x = Area)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = expression(paste("Lateral Occipital Label Area (", mm^2, ")")), y = "Accuracy") + # set the names for main, x and y axises Experiment 1
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_tb_all_lr_E1, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
dat_text_tb_all_lr_E2 <- data.frame(
Stimuli = levels(df_parts_LO_area$Stimuli)[3:4],
Hemisphere = "left",
label = c("English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_parts_LO_area$Stimuli)[3:4],
x = .5, # c(1.1, 1.2),
y = 1.05
)
plot_topbottom_LO_area_E2 <- ggplot(filter(df_parts_LO_area, Exp == "E2"), aes(y = mean, x = Area)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = expression(paste("Lateral Occipital Label Area (", mm^2, ")")), y = "Accuracy") + # set the names for main, x and y axises Experiment 2
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_tb_all_lr_E2, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
plot_topbottom_LO_area_lr <- ggarrange(plot_topbottom_LO_area_E1, plot_topbottom_LO_area_E2, ncol = 2,
# labels = c("", "Top vs. bottom; left vs. right"),
# label.x = -0.6,
# label.y = 1,
font.label = list(size = 24))
# ggsave('plot_decode_LO_area_topbottom_lr.pdf', plot_topbottom_LO_area_lr, width = 15, height = 8)
plot_topbottom_LO_area_lr
# combine all decoding results
df_decoding <- rbind(mutate(one_decode_agg_E1_FFA1, Exp = "E1", ROI = "FFA1"),
mutate(one_decode_agg_E1_FFA2, Exp = "E1", ROI = "FFA2"),
mutate(one_decode_agg_E1_VWFA, Exp = "E1", ROI = "VWFA"),
mutate(one_decode_agg_E1_LO, Exp = "E1", ROI = "LO"),
mutate(one_decode_agg_E2_FFA1, Exp = "E2", ROI = "FFA1"),
mutate(one_decode_agg_E2_FFA2, Exp = "E2", ROI = "FFA2"),
mutate(one_decode_agg_E2_VWFA, Exp = "E2", ROI = "VWFA"),
mutate(one_decode_agg_E2_LO, Exp = "E2", ROI = "LO")) %>%
separate(ClassifyPair, c("Stimuli1", "Layout1", "Stimuli2", "Layout2")) %>%
mutate(Stimuli1 = if_else(Stimuli1 %in% c("face", "word"), paste0(Stimuli1, "s"), Stimuli1),
Stimuli2 = if_else(Stimuli2 %in% c("face", "word"), paste0(Stimuli2, "s"), Stimuli2),
Stimuli = ifelse(Stimuli1 == Stimuli2, Stimuli1,
paste(Stimuli1, Stimuli2, sep = "\nvs.\n")),
Layout = ifelse(Layout1 == Layout2, Layout1,
paste(toTitleCase(Layout1), toTitleCase(Layout2), sep = "\nvs.\n"))) %>%
select(-c(Stimuli1, Stimuli2, Layout1, Layout2))
# df_decoding$ROI <- fct_relevel(df_decoding$ROI, "LO", after = Inf)
# xaxislabel <- strsplit(unique(df_0$Stimuli), "\nvs.\n")[[1]]
# save the df for intact English vs. intact Chinese
df_intact <- filter(df_decoding, Layout == "intact") %>%
mutate(Stimuli = fct_rev(Stimuli))
dat_text_intact_all <- data.frame(
Stimuli = c("faces\nvs.\nwords", "English\nvs.\nChinese"),
Hemisphere = c("left"),
label = c("Chinese speakers: \nfaces vs. Chinese characters",
"English speakers: \nEnglish words vs. Chinese characters"),
x = 0.5,
y = 1.05
)
# intact
plot_intact_all <- ggplot(df_intact, aes(y = mean, x = ROI)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_intact_all, aes(x = x, y = y, label = label), hjust = 0, size = 4, fontface = "bold") + #
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_decode_all_intact.pdf', plot_intact_all, width = 8, height = 8)
plot_intact_all
df_inex <- df_decoding %>%
filter(str_detect(Stimuli, "vs.", negate = TRUE),
Layout == "Intact\nvs.\nExchange")
df_inex$Stimuli <- fct_relevel(df_inex$Stimuli, "English", after = Inf)
df_inex$Stimuli <- fct_relevel(df_inex$Stimuli, "Chinese", after = Inf)
dat_text_inex_all <- data.frame(
Stimuli = levels(df_inex$Stimuli),
Hemisphere = c("left"),
label = c("Chinese speakers: \nfaces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_inex$Stimuli),
x = .5,
y = 1.05
)
plot_inex_all <- ggplot(df_inex, aes(y = mean, x = ROI)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises title = "Intact vs. Exchange",
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust = 0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 13), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_decode_all_inex.pdf', plot_inex_all, width = 8, height = 16)
plot_inex_all
dat_text_inex_all_lr_fw <- data.frame(
Stimuli = levels(df_inex$Stimuli)[1:2],
Hemisphere = "left",
label = c("Chinese speakers: \nfaces", # intact vs. exchanged
"Chinese speakers: \nChinese characters"), # intact vs. exchanged levels(df_inex$Stimuli)[1:2],
x = .5,
y = 1.05
)
plot_inex_E1 <- ggplot(filter(df_inex, Exp == "E1"), aes(y = mean, x = ROI)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises Experiment 1 title = "",
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_lr_fw, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust = 0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
dat_text_inex_all_lr_ec <- data.frame(
Stimuli = levels(df_inex$Stimuli)[3:4],
Hemisphere = "left",
label = c("English speakers: \nEnglish words", # intact vs. exchanged
"English speakers: \nChinese characters"), # intact vs. exchanged levels(df_inex$Stimuli)[3:4],
x = .5,
y = 1.05
)
plot_inex_E2 <- ggplot(filter(df_inex, Exp == "E2"), aes(y = mean, x = ROI)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises Experiment 2 title = "",
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_lr_ec, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust = 0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
plot_inex_lr <- ggarrange(plot_inex_E1, plot_inex_E2, ncol = 2,
# labels = c("", "Intact vs. Exchange"),
# label.x = -0.36,
# label.y = 1,
font.label = list(size = 24))
# ggsave('plot_decode_all_inex_lr.pdf', plot_inex_lr, width = 15, height = 8)
plot_inex_lr
df_parts <- df_decoding %>%
filter(str_detect(Stimuli, "vs.", negate = TRUE),
Layout != "Intact\nvs.\nExchange") %>%
mutate(Stimuli = as_factor(Stimuli))
df_parts$Stimuli <- fct_relevel(df_parts$Stimuli, "English", after = Inf)
df_parts$Stimuli <- fct_relevel(df_parts$Stimuli, "Chinese", after = Inf)
dat_parts_inex_all <- data.frame(
Stimuli = levels(df_parts$Stimuli),
Hemisphere = c("left"),
label = c("Chinese speakers: \nfaces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"), #levels(df_parts$Stimuli),
x = .5, # c(1, 1.1, 1.2, 1.2),
y = 1.05
)
plot_topbottom_all <- ggplot(df_parts, aes(y = mean, x = ROI)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(title = "Top vs. bottom; left vs. right", x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_parts_inex_all, mapping = aes(x = x, y = y, label = label), size = 6, fontface = "bold", hjust = 0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_decode_all_topbottom.pdf', plot_topbottom_all, width = 8, height = 16)
plot_topbottom_all
dat_text_tb_all_lr_E1 <- data.frame(
Stimuli = levels(df_parts$Stimuli)[1:2],
Hemisphere = "left",
label = c("Chinese speakers: \nfaces",
"Chinese speakers: \nChinese characters"), #levels(df_parts$Stimuli)[1:2],
x = .5,
y = 1.05
)
plot_topbottom_E1 <- ggplot(filter(df_parts, Exp == "E1"), aes(y = mean, x = ROI)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises Experiment 1
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_tb_all_lr_E1, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust =0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
dat_text_tb_all_lr_E2 <- data.frame(
Stimuli = levels(df_parts$Stimuli)[3:4],
Hemisphere = "left",
label = c("English speakers: \nEnglish words",
"English speakers: \nChinese characters"), #levels(df_parts$Stimuli)[3:4],
x = .5,
y = 1.05
)
plot_topbottom_E2 <- ggplot(filter(df_parts, Exp == "E2"), aes(y = mean, x = ROI)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises Experiment 2
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_tb_all_lr_E2, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
plot_topbottom_lr <- ggarrange(plot_topbottom_E1, plot_topbottom_E2, ncol = 2,
# labels = c("", "Top vs. bottom; left vs. right"),
# label.x = -0.6,
# label.y = 1,
font.label = list(size = 24))
# ggsave('plot_decode_all_topbottom_lr.pdf', plot_topbottom_lr, width = 15, height = 8)
plot_topbottom_lr
df_uni_ie <- rbind(mutate(desp_uni_E1_FFA1_ie, Exp = "E1", ROI = "FFA1"),
mutate(desp_uni_E1_FFA2_ie, Exp = "E1", ROI = "FFA2"),
mutate(desp_uni_E1_VWFA_ie, Exp = "E1", ROI = "VWFA"),
mutate(desp_uni_E1_LO_ie, Exp = "E1", ROI = "LO"),
mutate(desp_uni_E2_FFA1_ie, Exp = "E2", ROI = "FFA1"),
mutate(desp_uni_E2_FFA2_ie, Exp = "E2", ROI = "FFA2"),
mutate(desp_uni_E2_VWFA_ie, Exp = "E2", ROI = "VWFA"),
mutate(desp_uni_E2_LO_ie, Exp = "E2", ROI = "LO")) %>%
mutate(Stimuli = FaceWord,
HemiROI = paste0(toTitleCase(substr(Hemisphere,1,1)), ROI))
df_uni_ie_sig <- rbind(mutate(as_tibble(simple_E1_lFFA1_ie), Exp = "E1", ROI = "FFA1", Hemisphere = "left"),
mutate(as_tibble(simple_E1_lFFA2_ie), Exp = "E1", ROI = "FFA2", Hemisphere = "left"),
mutate(as_tibble(simple_E1_VWFA_ie), Exp = "E1", ROI = "VWFA", Hemisphere = "left"),
mutate(as_tibble(simple_E1_lLO_ie), Exp = "E1", ROI = "LO", Hemisphere = "left"),
mutate(as_tibble(simple_E2_lFFA1_ie), Exp = "E2", ROI = "FFA1", Hemisphere = "left"),
mutate(as_tibble(simple_E2_lFFA2_ie), Exp = "E2", ROI = "FFA2", Hemisphere = "left"),
mutate(as_tibble(simple_E2_VWFA_ie), Exp = "E2", ROI = "VWFA", Hemisphere = "left"),
mutate(as_tibble(simple_E2_lLO_ie), Exp = "E2", ROI = "LO", Hemisphere = "left"),
mutate(as_tibble(simple_E1_rFFA1_ie), Exp = "E1", ROI = "FFA1", Hemisphere = "right"),
mutate(as_tibble(simple_E1_rFFA2_ie), Exp = "E1", ROI = "FFA2", Hemisphere = "right"),
mutate(as_tibble(simple_E1_rLO_ie), Exp = "E1", ROI = "LO", Hemisphere = "right"),
mutate(as_tibble(simple_E2_rFFA1_ie), Exp = "E2", ROI = "FFA1", Hemisphere = "right"),
mutate(as_tibble(simple_E2_rFFA2_ie), Exp = "E2", ROI = "FFA2", Hemisphere = "right"),
mutate(as_tibble(simple_E2_rLO_ie), Exp = "E2", ROI = "LO", Hemisphere = "right")) %>%
filter(FaceWord != ".") %>%
mutate(Stimuli = FaceWord,
HemiROI = paste0(toTitleCase(substr(Hemisphere,1,1)), ROI),
Layout = "intact") %>%
select(Exp, Layout, Stimuli, HemiROI, p.value)
df_uni_ie <- left_join(df_uni_ie, df_uni_ie_sig) %>%
mutate(p.value = if_else(is.na(p.value), 1, p.value),
Stimuli = as_factor(Stimuli))
df_uni_ie$Layout <- fct_relevel(df_uni_ie$Layout, "exchange", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "English", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "Chinese", after = Inf)
dat_text_inex_all <- data.frame(
Stimuli = levels(df_uni_ie$Stimuli),
HemiROI = c("lFFA1"),
label = c("Chinese speakers: \nfaces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_uni_ie$Stimuli),
x = .5, # c(1, 1.1, 1.2, 1.2),
y = 2.65
)
plot_uni_inex_all <- ggplot(df_uni_ie, aes(y = emmean, x = Layout)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ HemiROI, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(title = "Intact vs. Exchanged", x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65, nudge_x = 0.5) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all, mapping = aes(x = x, y = y, label = label), size = 7, fontface = "bold", hjust =0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 13), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_uni_all_inex.pdf', plot_uni_inex_all, width = 8, height = 16)
plot_uni_inex_all
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "English", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "Chinese", after = Inf)
dat_text_inex_all_ <- data.frame(
Stimuli = levels(df_uni_ie$Stimuli),
Hemisphere = "left",
Layout = "intact",
label = c("Chinese speakers: \nfaces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_uni_ie$Stimuli),
x = .5, #c(1, 1.1, 1.2, 1.2)-0.3,
y = 2.65
)
plot_uni_inex_all_ <- ggplot(df_uni_ie, aes(y = emmean, x = ROI, fill = Layout)) +
geom_col(position = "dodge", width = .5) + # , fill = "#CDCDC8"
scale_fill_manual(values = c("#C0C0C0", "#A9A9A9")) +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.5)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(title = "Intact vs. Exchanged", x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_, mapping = aes(x = x, y = y, label = label), size = 7, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
text = element_text(colour="black", size = 18),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
# axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
legend.position = "bottom",
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 13), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_uni_all_inex.pdf', plot_uni_inex_all, width = 8, height = 16)
plot_uni_inex_all_
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "English", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "Chinese", after = Inf)
df_uni_ie_1 <- filter(df_uni_ie, Stimuli %in% c("faces", "words")) %>%
droplevels()
dat_text_inex_all_lr1 <- data.frame(
Stimuli = levels(df_uni_ie_1$Stimuli),
Hemisphere = "left",
Layout = "intact",
label = c("Chinese speakers: \nfaces",
"Chinese speakers: \nChinese characters"), # levels(df_uni_ie_1$Stimuli),
x = .5, # c(1, 1.1),
y = 2.65
)
plot_uni_inex_all_lr1 <- ggplot(df_uni_ie_1, aes(y = emmean, x = ROI, fill = Layout)) +
geom_col(position = "dodge", width = .5) + # , fill = "#CDCDC8"
scale_fill_manual(values = c("#CDCDC8", "#A9A9A9")) +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.5)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(x = "ROIs", y = "Beta values") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_lr1, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
text = element_text(colour="black", size = 18),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 18),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
legend.position = "bottom",
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 18), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
df_uni_ie_2 <- filter(df_uni_ie, Stimuli %in% c("English", "Chinese")) %>%
droplevels()
dat_text_inex_all_lr2 <- data.frame(
Stimuli = levels(df_uni_ie_2$Stimuli),
Hemisphere = "left",
Layout = "intact",
label = c("English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_uni_ie_2$Stimuli),
x = .5, #c(1.1, 1.2),
y = 2.65
)
plot_uni_inex_all_lr2 <- ggplot(df_uni_ie_2, aes(y = emmean, x = ROI, fill = Layout)) +
geom_col(position = "dodge", width = .5) + # , fill = "#CDCDC8"
scale_fill_manual(values = c("#CDCDC8", "#A9A9A9")) +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.5)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(x = "ROIs", y = "Beta values") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_lr2, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
text = element_text(colour="black", size = 18),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 18),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
legend.position = "bottom",
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 18), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
plot_uni_ie_lr <- ggarrange(plot_uni_inex_all_lr1, plot_uni_inex_all_lr2, ncol = 2,
# labels = c("", "Intact vs. Exchange"),
# label.x = -0.36,
# label.y = 1,
font.label = list(size = 24))
# ggsave('plot_uni_all_ie_lr.pdf', plot_uni_ie_lr, width = 15, height = 9)
plot_uni_ie_lr
df_uni_tb <- rbind(mutate(desp_uni_E1_FFA1_tb, Exp = "E1", ROI = "FFA1"),
mutate(desp_uni_E1_FFA2_tb, Exp = "E1", ROI = "FFA2"),
mutate(desp_uni_E1_VWFA_tb, Exp = "E1", ROI = "VWFA"),
mutate(desp_uni_E1_LO_tb, Exp = "E1", ROI = "LO"),
mutate(desp_uni_E2_FFA1_tb, Exp = "E2", ROI = "FFA1"),
mutate(desp_uni_E2_FFA2_tb, Exp = "E2", ROI = "FFA2"),
mutate(desp_uni_E2_VWFA_tb, Exp = "E2", ROI = "VWFA"),
mutate(desp_uni_E2_LO_tb, Exp = "E2", ROI = "LO")) %>%
mutate(Stimuli = FaceWord,
HemiROI = paste0(toTitleCase(substr(Hemisphere,1,1)), ROI))
df_uni_tb_sig <- rbind(mutate(as_tibble(simple_E1_lFFA1_tb), Exp = "E1", ROI = "FFA1", Hemisphere = "left"),
mutate(as_tibble(simple_E1_lFFA2_tb), Exp = "E1", ROI = "FFA2", Hemisphere = "left"),
mutate(as_tibble(simple_E1_VWFA_tb), Exp = "E1", ROI = "VWFA", Hemisphere = "left"),
mutate(as_tibble(simple_E1_lLO_tb), Exp = "E1", ROI = "LO", Hemisphere = "left"),
mutate(as_tibble(simple_E2_lFFA1_tb), Exp = "E2", ROI = "FFA1", Hemisphere = "left"),
mutate(as_tibble(simple_E2_lFFA2_tb), Exp = "E2", ROI = "FFA2", Hemisphere = "left"),
mutate(as_tibble(simple_E2_VWFA_tb), Exp = "E2", ROI = "VWFA", Hemisphere = "left"),
mutate(as_tibble(simple_E2_lLO_tb), Exp = "E2", ROI = "LO", Hemisphere = "left"),
mutate(as_tibble(simple_E1_rFFA1_tb), Exp = "E1", ROI = "FFA1", Hemisphere = "right"),
mutate(as_tibble(simple_E1_rFFA2_tb), Exp = "E1", ROI = "FFA2", Hemisphere = "right"),
mutate(as_tibble(simple_E1_rLO_tb), Exp = "E1", ROI = "LO", Hemisphere = "right"),
mutate(as_tibble(simple_E2_rFFA1_tb), Exp = "E2", ROI = "FFA1", Hemisphere = "right"),
mutate(as_tibble(simple_E2_rFFA2_tb), Exp = "E2", ROI = "FFA2", Hemisphere = "right"),
mutate(as_tibble(simple_E2_rLO_tb), Exp = "E2", ROI = "LO", Hemisphere = "right")) %>%
filter(FaceWord != ".") %>%
mutate(Stimuli = FaceWord,
HemiROI = paste0(toTitleCase(substr(Hemisphere,1,1)), ROI),
Layout = case_when(Exp == "E1" ~ "top",
Exp == "E2" ~ "partA")) %>%
select(Exp, Layout, Stimuli, HemiROI, p.value)
df_uni_tb <- left_join(df_uni_tb, df_uni_tb_sig) %>%
mutate(p.value = if_else(is.na(p.value), 1, p.value),
Stimuli = as_factor(Stimuli))
# df_uni_tb$Layout <- fct_relevel(df_uni_tb$Layout, "exchange", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "English", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "Chinese", after = Inf)
dat_text_inex_all <- data.frame(
Stimuli = levels(df_uni_tb$Stimuli),
HemiROI = c("lFFA1"),
label = c("Chinese speakers: \nfaces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_uni_tb$Stimuli),
x = .5, # c(1, 1.1, 1.2, 1.2),
y = 2.65
)
df_uni_tb_all <- df_uni_tb %>%
mutate(Layout = case_when(Layout %in% c("top", "partA") ~ "Part 1",
Layout %in% c("bottom", "partB") ~ "Part 2"))
plot_uni_inex_all <- ggplot(df_uni_tb_all, aes(y = emmean, x = Layout)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ HemiROI, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises title = "Top vs. Bottom; Left vs. Right",
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65, nudge_x = 0.5) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all, mapping = aes(x = x, y = y, label = label), size = 7, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 13), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_uni_all_inex.pdf', plot_uni_inex_all, width = 8, height = 16)
plot_uni_inex_all
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "English", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "Chinese", after = Inf)
dat_text_inex_all_ <- data.frame(
Stimuli = levels(df_uni_tb$Stimuli),
Hemisphere = "left",
Layout = "Part 1",
label = c("Chinese speakers: \nfaces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_uni_tb$Stimuli),
x = .5, # c(1, 1.1, 1.2, 1.2)-0.3,
y = 2.65
)
plot_uni_inex_all_ <- ggplot(df_uni_tb_all, aes(y = emmean, x = ROI, fill = Layout)) +
geom_col(position = "dodge", width = .5) + # , fill = "#CDCDC8"
scale_fill_manual(values = c("#C0C0C0", "#A9A9A9")) +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.5)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(title = "Top vs. Bottom; Left vs. Right", x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_, mapping = aes(x = x, y = y, label = label), size = 7, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
text = element_text(colour="black", size = 18),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
# axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
legend.position = "bottom",
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 13), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_uni_all_inex.pdf', plot_uni_inex_all, width = 8, height = 16)
plot_uni_inex_all_
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "English", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "Chinese", after = Inf)
df_uni_tb_1 <- filter(df_uni_tb_all, Stimuli %in% c("faces", "words")) %>%
droplevels()
dat_text_inex_all_lr1 <- data.frame(
Stimuli = levels(df_uni_tb_1$Stimuli),
Hemisphere = "left",
Layout = "Part 1",
label = c("Chinese speakers: \nfaces",
"Chinese speakers: \nChinese characters"), #levels(df_uni_tb_1$Stimuli),
x = .5, # c(1, 1.1),
y = 2.65
)
plot_uni_inex_all_lr1 <- ggplot(df_uni_tb_1, aes(y = emmean, x = ROI, fill = Layout)) +
geom_col(position = "dodge", width = .5) + # , fill = "#CDCDC8"
scale_fill_manual(values = c("#CDCDC8", "#A9A9A9")) +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.5)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(x = "ROIs", y = "Beta values") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_lr1, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
text = element_text(colour="black", size = 18),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 18),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
legend.position = "bottom",
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 18), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
df_uni_tb_2 <- filter(df_uni_tb_all, Stimuli %in% c("English", "Chinese")) %>%
droplevels()
dat_text_inex_all_lr2 <- data.frame(
Stimuli = levels(df_uni_tb_2$Stimuli),
Hemisphere = "left",
Layout = "Part 1",
label = c("English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_uni_tb_2$Stimuli),
x = .5, # c(1.1, 1.2),
y = 2.65
)
plot_uni_inex_all_lr2 <- ggplot(df_uni_tb_2, aes(y = emmean, x = ROI, fill = Layout)) +
geom_col(position = "dodge", width = .5) + # , fill = "#CDCDC8"
scale_fill_manual(values = c("#CDCDC8", "#A9A9A9")) +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.5)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(x = "ROIs", y = "Beta values") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_lr2, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
text = element_text(colour="black", size = 18),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 18),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
legend.position = "bottom",
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 18), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
plot_uni_tb_lr <- ggarrange(plot_uni_inex_all_lr1, plot_uni_inex_all_lr2, ncol = 2,
# labels = c("", "Top vs. Bottom; Left vs. Right"),
# label.x = -0.6,
# label.y = 1,
font.label = list(size = 24))
# ggsave('plot_uni_all_tb_lr.pdf', plot_uni_tb_lr, width = 15, height = 9)
plot_uni_tb_lr
# rstudioapi::versionInfo()
sessionInfo()
## R version 3.6.3 (2020-02-29)
## Platform: x86_64-apple-darwin15.6.0 (64-bit)
## Running under: macOS Mojave 10.14.5
##
## Matrix products: default
## BLAS: /Library/Frameworks/R.framework/Versions/3.6/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/3.6/Resources/lib/libRlapack.dylib
##
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
##
## attached base packages:
## [1] tools stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] ggpubr_0.2.5 magrittr_2.0.1 emmeans_1.4.7 lmerTest_3.1-0 afex_0.25-1 lme4_1.1-21 Matrix_1.2-18 forcats_0.4.0 stringr_1.4.0 dplyr_0.8.5 purrr_0.3.3 readr_1.3.1 tidyr_1.0.2 tibble_3.0.1 ggplot2_3.3.0 tidyverse_1.2.1
##
## loaded via a namespace (and not attached):
## [1] httr_1.4.1 jsonlite_1.7.1 splines_3.6.3 carData_3.0-3 modelr_0.1.5 assertthat_0.2.1 cellranger_1.1.0 yaml_2.2.1 numDeriv_2016.8-1.1 pillar_1.4.4 backports_1.1.5 lattice_0.20-38 glue_1.4.2 digest_0.6.27 ggsignif_0.6.0 rvest_0.3.5 minqa_1.2.4 colorspace_1.4-1 cowplot_1.0.0 htmltools_0.5.0 plyr_1.8.6 pkgconfig_2.0.3
## [23] broom_0.5.3.9000 haven_2.2.0 xtable_1.8-4 mvtnorm_1.0-11 scales_1.0.0 openxlsx_4.1.3 rio_0.5.16 generics_0.0.2 car_3.0-5 ellipsis_0.3.1 withr_2.1.2 cli_2.0.2 crayon_1.3.4 readxl_1.3.1 estimability_1.3 evaluate_0.14 fansi_0.4.1 nlme_3.1-144 MASS_7.3-51.5 xml2_1.2.2 foreign_0.8-75 data.table_1.12.6
## [45] hms_0.5.3 lifecycle_0.2.0 munsell_0.5.0 zip_2.0.4 compiler_3.6.3 rlang_0.4.8 grid_3.6.3 nloptr_1.2.1 rstudioapi_0.11 labeling_0.3 rmarkdown_2.1 boot_1.3-24 gtable_0.3.0 abind_1.4-5 curl_4.3 reshape2_1.4.3 R6_2.4.1 lubridate_1.7.4 knitr_1.30 stringi_1.5.3 parallel_3.6.3 Rcpp_1.0.4.6
## [67] vctrs_0.3.1 tidyselect_1.0.0 xfun_0.19 coda_0.19-3
A work by Haiyang Jin